language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/util/check_serialize.py | {
"start": 707,
"end": 8440
} | class ____:
"""Represents the serialization 'frame'.
Attributes:
obj: The object that fails serialization.
name: The variable name of the object.
parent: The object that references the `obj`.
"""
def __init__(self, obj: Any, name: str, parent: Any):
self.obj = obj
self.name = name
self.parent = parent
def __repr__(self):
return f"FailTuple({self.name} [obj={self.obj}, parent={self.parent}])"
def _inspect_func_serialization(base_obj, depth, parent, failure_set, printer):
"""Adds the first-found non-serializable element to the failure_set."""
assert inspect.isfunction(base_obj)
closure = inspect.getclosurevars(base_obj)
found = False
if closure.globals:
printer.print(
f"Detected {len(closure.globals)} global variables. "
"Checking serializability..."
)
with printer.indent():
for name, obj in closure.globals.items():
serializable, _ = _inspect_serializability(
obj,
name=name,
depth=depth - 1,
parent=parent,
failure_set=failure_set,
printer=printer,
)
found = found or not serializable
if found:
break
if closure.nonlocals:
printer.print(
f"Detected {len(closure.nonlocals)} nonlocal variables. "
"Checking serializability..."
)
with printer.indent():
for name, obj in closure.nonlocals.items():
serializable, _ = _inspect_serializability(
obj,
name=name,
depth=depth - 1,
parent=parent,
failure_set=failure_set,
printer=printer,
)
found = found or not serializable
if found:
break
if not found:
printer.print(
f"WARNING: Did not find non-serializable object in {base_obj}. "
"This may be an oversight."
)
return found
def _inspect_generic_serialization(base_obj, depth, parent, failure_set, printer):
"""Adds the first-found non-serializable element to the failure_set."""
assert not inspect.isfunction(base_obj)
functions = inspect.getmembers(base_obj, predicate=inspect.isfunction)
found = False
with printer.indent():
for name, obj in functions:
serializable, _ = _inspect_serializability(
obj,
name=name,
depth=depth - 1,
parent=parent,
failure_set=failure_set,
printer=printer,
)
found = found or not serializable
if found:
break
with printer.indent():
members = inspect.getmembers(base_obj)
for name, obj in members:
if name.startswith("__") and name.endswith("__") or inspect.isbuiltin(obj):
continue
serializable, _ = _inspect_serializability(
obj,
name=name,
depth=depth - 1,
parent=parent,
failure_set=failure_set,
printer=printer,
)
found = found or not serializable
if found:
break
if not found:
printer.print(
f"WARNING: Did not find non-serializable object in {base_obj}. "
"This may be an oversight."
)
return found
@DeveloperAPI
def inspect_serializability(
base_obj: Any,
name: Optional[str] = None,
depth: int = 3,
print_file: Optional[Any] = None,
) -> Tuple[bool, Set[FailureTuple]]:
"""Identifies what objects are preventing serialization.
Args:
base_obj: Object to be serialized.
name: Optional name of string.
depth: Depth of the scope stack to walk through. Defaults to 3.
print_file: file argument that will be passed to print().
Returns:
bool: True if serializable.
set[FailureTuple]: Set of unserializable objects.
.. versionadded:: 1.1.0
"""
printer = _Printer(print_file)
return _inspect_serializability(base_obj, name, depth, None, None, printer)
def _inspect_serializability(
base_obj, name, depth, parent, failure_set, printer
) -> Tuple[bool, Set[FailureTuple]]:
colorama.init()
top_level = False
declaration = ""
found = False
if failure_set is None:
top_level = True
failure_set = set()
declaration = f"Checking Serializability of {base_obj}"
printer.print("=" * min(len(declaration), 80))
printer.print(declaration)
printer.print("=" * min(len(declaration), 80))
if name is None:
name = str(base_obj)
else:
printer.print(f"Serializing '{name}' {base_obj}...")
try:
cp.dumps(base_obj)
return True, failure_set
except Exception as e:
printer.print(
f"{colorama.Fore.RED}!!! FAIL{colorama.Fore.RESET} " f"serialization: {e}"
)
found = True
try:
if depth == 0:
failure_set.add(FailureTuple(base_obj, name, parent))
# Some objects may not be hashable, so we skip adding this to the set.
except Exception:
pass
if depth <= 0:
return False, failure_set
# TODO: we only differentiate between 'function' and 'object'
# but we should do a better job of diving into something
# more specific like a Type, Object, etc.
if inspect.isfunction(base_obj):
_inspect_func_serialization(
base_obj,
depth=depth,
parent=base_obj,
failure_set=failure_set,
printer=printer,
)
else:
_inspect_generic_serialization(
base_obj,
depth=depth,
parent=base_obj,
failure_set=failure_set,
printer=printer,
)
if not failure_set:
failure_set.add(FailureTuple(base_obj, name, parent))
if top_level:
printer.print("=" * min(len(declaration), 80))
if not failure_set:
printer.print(
"Nothing failed the inspect_serialization test, though "
"serialization did not succeed."
)
else:
fail_vars = (
f"\n\n\t{colorama.Style.BRIGHT}"
+ "\n".join(str(k) for k in failure_set)
+ f"{colorama.Style.RESET_ALL}\n\n"
)
printer.print(
f"Variable: {fail_vars}was found to be non-serializable. "
"There may be multiple other undetected variables that were "
"non-serializable. "
)
printer.print(
"Consider either removing the "
"instantiation/imports of these variables or moving the "
"instantiation into the scope of the function/class. "
)
printer.print("=" * min(len(declaration), 80))
printer.print(
"Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information." # noqa
)
printer.print(
"If you have any suggestions on how to improve "
"this error message, please reach out to the "
"Ray developers on github.com/ray-project/ray/issues/"
)
printer.print("=" * min(len(declaration), 80))
return not found, failure_set
| FailureTuple |
python | pytorch__pytorch | test/onnx/model_defs/rnn_model_with_packed_sequence.py | {
"start": 611,
"end": 1117
} | class ____(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, seq_lengths):
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return list([ret] + list(rets))
| RnnModelWithPackedSequenceWithoutState |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/ExampleApp.py | {
"start": 3127,
"end": 9273
} | class ____(QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
# Python keywords
keywords = keyword.kwlist
# Python operators
operators = [
r'=',
# Comparison
r'==', r'!=', r'<', r'<=', r'>', r'>=',
# Arithmetic
r'\+', r"-", r'\*', r'/', r'//', r'%', r'\*\*',
# In-place
r'\+=', r'-=', r'\*=', r'/=', r'\%=',
# Bitwise
r'\^', r'\|', r'&', r'~', r'>>', r'<<',
]
# Python braces
braces = [
r'\{', r'\}', r'\(', r'\)', r'\[', r'\]',
]
def __init__(self, document):
super().__init__(document)
# Multi-line strings (expression, flag, style)
self.tri_single = (QRegularExpression("'''"), 1, 'string2')
self.tri_double = (QRegularExpression('"""'), 2, 'string2')
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, 'keyword')
for w in PythonHighlighter.keywords]
rules += [(o, 0, 'operator')
for o in PythonHighlighter.operators]
rules += [(b, 0, 'brace')
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, 'self'),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, 'defclass'),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, 'defclass'),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, 'numbers'),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, 'numbers'),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, 'numbers'),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, 'string'),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, 'string'),
# From '#' until a newline
(r'#[^\n]*', 0, 'comment'),
]
self.rules = rules
self.searchText = None
@property
def styles(self):
app = QtWidgets.QApplication.instance()
return DARK_STYLES if app.property('darkMode') else LIGHT_STYLES
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
rules = self.rules.copy()
for expression, nth, format in rules:
format = self.styles[format]
for n, match in enumerate(re.finditer(expression, text)):
if n < nth:
continue
start = match.start()
length = match.end() - start
self.setFormat(start, length, format)
self.applySearchHighlight(text)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings.
=========== ==========================================================
delimiter (QRegularExpression) for triple-single-quotes or
triple-double-quotes
in_state (int) to represent the corresponding state changes when
inside those strings. Returns True if we're still inside a
multi-line string when this function is finished.
style (str) representation of the kind of style to use
=========== ==========================================================
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
match = delimiter.match(text)
start = match.capturedStart()
# Move past this match
add = match.capturedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
match = delimiter.match(text, start + add)
end = match.capturedEnd()
# Ending delimiter on this line?
if end >= add:
length = end - start + add + match.capturedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, self.styles[style])
# Highlighting sits on top of this formatting
# Look for the next match
match = delimiter.match(text, start + length)
start = match.capturedStart()
self.applySearchHighlight(text)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
def applySearchHighlight(self, text):
if not self.searchText:
return
expr = f'(?i){self.searchText}'
palette: QtGui.QPalette = app.palette()
color = palette.highlight().color()
fgndColor = palette.color(palette.ColorGroup.Current,
palette.ColorRole.Text).name()
style = charFormat(fgndColor, background=color.name())
for match in re.finditer(expr, text):
start = match.start()
length = match.end() - start
self.setFormat(start, length, style)
def unnestedDict(exDict):
"""Converts a dict-of-dicts to a singly nested dict for non-recursive parsing"""
out = {}
for kk, vv in exDict.items():
if isinstance(vv, dict):
out.update(unnestedDict(vv))
else:
out[kk] = vv
return out
| PythonHighlighter |
python | Textualize__textual | examples/mother.py | {
"start": 809,
"end": 913
} | class ____(Markdown):
"""Markdown for the reply from the LLM."""
BORDER_TITLE = "Mother"
| Response |
python | keras-team__keras | keras/src/saving/saving_lib.py | {
"start": 32905,
"end": 34873
} | class ____:
"""Asset store backed by disk storage.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the full path of
the target directory.
"""
def __init__(self, root_path, archive=None, mode=None):
self.mode = mode
self.root_path = root_path
self.archive = archive
self.tmp_dir = None
if self.archive:
self.tmp_dir = get_temp_dir()
if self.mode == "r":
file_utils.extract_open_archive(self.archive, self.tmp_dir)
self.working_dir = file_utils.join(
self.tmp_dir, self.root_path
).replace("\\", "/")
if self.mode == "w":
file_utils.makedirs(self.working_dir)
else:
if mode == "r":
self.working_dir = root_path
else:
self.tmp_dir = get_temp_dir()
self.working_dir = file_utils.join(
self.tmp_dir, self.root_path
).replace("\\", "/")
file_utils.makedirs(self.working_dir)
def make(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path).replace("\\", "/")
if not file_utils.exists(path):
file_utils.makedirs(path)
return path
def get(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path).replace("\\", "/")
if file_utils.exists(path):
return path
return None
def close(self):
if self.mode == "w" and self.archive:
_write_to_zip_recursively(
self.archive, self.working_dir, self.root_path
)
if self.tmp_dir and file_utils.exists(self.tmp_dir):
file_utils.rmtree(self.tmp_dir)
| DiskIOStore |
python | matplotlib__matplotlib | lib/matplotlib/bezier.py | {
"start": 5251,
"end": 19248
} | class ____:
"""
A d-dimensional Bézier segment.
A BezierSegment can be called with an argument, either a scalar or an array-like
object, to evaluate the curve at that/those location(s).
Parameters
----------
control_points : (N, d) array
Location of the *N* control points.
"""
def __init__(self, control_points):
self._cpoints = np.asarray(control_points)
self._N, self._d = self._cpoints.shape
self._orders = np.arange(self._N)
coeff = [math.factorial(self._N - 1)
// (math.factorial(i) * math.factorial(self._N - 1 - i))
for i in range(self._N)]
self._px = (self._cpoints.T * coeff).T
def __call__(self, t):
"""
Evaluate the Bézier curve at point(s) *t* in [0, 1].
Parameters
----------
t : (k,) array-like
Points at which to evaluate the curve.
Returns
-------
(k, d) array
Value of the curve for each point in *t*.
"""
t = np.asarray(t)
return (np.power.outer(1 - t, self._orders[::-1])
* np.power.outer(t, self._orders)) @ self._px
@_api.deprecated(
"3.11", alternative="Call the BezierSegment object with an argument.")
def point_at_t(self, t):
"""
Evaluate the curve at a single point, returning a tuple of *d* floats.
"""
return tuple(self(t))
@property
def control_points(self):
"""The control points of the curve."""
return self._cpoints
@property
def dimension(self):
"""The dimension of the curve."""
return self._d
@property
def degree(self):
"""Degree of the polynomial. One less the number of control points."""
return self._N - 1
@property
def polynomial_coefficients(self):
r"""
The polynomial coefficients of the Bézier curve.
.. warning:: Follows opposite convention from `numpy.polyval`.
Returns
-------
(n+1, d) array
Coefficients after expanding in polynomial basis, where :math:`n`
is the degree of the Bézier curve and :math:`d` its dimension.
These are the numbers (:math:`C_j`) such that the curve can be
written :math:`\sum_{j=0}^n C_j t^j`.
Notes
-----
The coefficients are calculated as
.. math::
{n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i
where :math:`P_i` are the control points of the curve.
"""
n = self.degree
# matplotlib uses n <= 4. overflow plausible starting around n = 15.
if n > 10:
warnings.warn("Polynomial coefficients formula unstable for high "
"order Bezier curves!", RuntimeWarning)
P = self.control_points
j = np.arange(n+1)[:, None]
i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j
prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1
return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1
def axis_aligned_extrema(self):
"""
Return the dimension and location of the curve's interior extrema.
The extrema are the points along the curve where one of its partial
derivatives is zero.
Returns
-------
dims : array of int
Index :math:`i` of the partial derivative which is zero at each
interior extrema.
dzeros : array of float
Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) =
0`
"""
n = self.degree
if n <= 1:
return np.array([]), np.array([])
Cj = self.polynomial_coefficients
dCj = np.arange(1, n+1)[:, None] * Cj[1:]
dims = []
roots = []
for i, pi in enumerate(dCj.T):
r = np.roots(pi[::-1])
roots.append(r)
dims.append(np.full_like(r, i))
roots = np.concatenate(roots)
dims = np.concatenate(dims)
in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)
return dims[in_range], np.real(roots)[in_range]
def split_bezier_intersecting_with_closedpath(
bezier, inside_closedpath, tolerance=0.01):
"""
Split a Bézier curve into two at the intersection with a closed path.
Parameters
----------
bezier : (N, 2) array-like
Control points of the Bézier segment. See `.BezierSegment`.
inside_closedpath : callable
A function returning True if a given point (x, y) is inside the
closed path. See also `.find_bezier_t_intersecting_with_closedpath`.
tolerance : float
The tolerance for the intersection. See also
`.find_bezier_t_intersecting_with_closedpath`.
Returns
-------
left, right
Lists of control points for the two Bézier segments.
"""
bz = BezierSegment(bezier)
t0, t1 = find_bezier_t_intersecting_with_closedpath(
lambda t: tuple(bz(t)), inside_closedpath, tolerance=tolerance)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
# matplotlib specific
def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False):
"""
Divide a path into two segments at the point where ``inside(x, y)`` becomes
False.
"""
from .path import Path
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
ctl_points_old = ctl_points
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
else:
raise ValueError("The path does not intersect with the patch")
bp = bezier_path.reshape((-1, 2))
left, right = split_bezier_intersecting_with_closedpath(
bp, inside, tolerance)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise AssertionError("This should never be reached")
verts_left = left[1:]
verts_right = right[:]
if path.codes is None:
path_in = Path(np.concatenate([path.vertices[:i], verts_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]))
else:
path_in = Path(np.concatenate([path.vertices[:iold], verts_left]),
np.concatenate([path.codes[:iold], codes_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]),
np.concatenate([codes_right, path.codes[i:]]))
if reorder_inout and not begin_inside:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
"""
Return a function that checks whether a point is in a circle with center
(*cx*, *cy*) and radius *r*.
The returned function has the signature::
f(xy: tuple[float, float]) -> bool
"""
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic Bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
# Account for divide by zero
if d == 0:
return 0.0, 0.0
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5):
"""
Check if two lines are parallel.
Parameters
----------
dx1, dy1, dx2, dy2 : float
The gradients *dy*/*dx* of the two lines.
tolerance : float
The angular tolerance in radians up to which the lines are considered
parallel.
Returns
-------
is_parallel
- 1 if two lines are parallel in same direction.
- -1 if two lines are parallel in opposite direction.
- False otherwise.
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = abs(theta1 - theta2)
if dtheta < tolerance:
return 1
elif abs(dtheta - np.pi) < tolerance:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic Bézier control points *bezier2*, returns
control points of quadratic Bézier lines roughly parallel to given
one separated by *width*.
"""
# The parallel Bezier lines are constructed by following ways.
# c1 and c2 are control points representing the start and end of the
# Bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
_api.warn_external(
"Lines do not intersect. A straight line is used instead.")
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also an angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
)
c2x_left, c2y_left, c2x_right, c2y_right = (
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
)
# find cm_left which is the intersecting point of a line through
# c1_left with angle t1 and a line through c2_left with angle
# t2. Same with cm_right.
try:
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
sin_t1, c2x_left, c2y_left,
cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
sin_t1, c2x_right, c2y_right,
cos_t2, sin_t2)
except ValueError:
# Special case straight lines, i.e., angle between two lines is
# less than the threshold used by get_intersection (we don't use
# check_if_parallel as the threshold is not the same).
cmx_left, cmy_left = (
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
)
cmx_right, cmy_right = (
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
)
# the parallel Bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
"""
Find control points of the Bézier curve passing through (*c1x*, *c1y*),
(*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to `get_parallels`, returns control points of two quadratic
Bézier lines having a width roughly parallel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the angle between c1 and cm, cm, c3.
# They are also an angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
)
c3x_left, c3y_left, c3x_right, c3y_right = (
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = (
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
| BezierSegment |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 1528,
"end": 1711
} | class ____(TypedDict, total=False):
healthData: HealthData | None
dateReleased: datetime | None
dateCreated: datetime | None
dateStarted: datetime | None
| ProjectOptional |
python | python-visualization__folium | folium/plugins/beautify_icon.py | {
"start": 161,
"end": 3383
} | class ____(JSCSSMixin, MacroElement):
"""
Create a BeautifyIcon that can be added to a Marker
Parameters
----------
icon: string, default None
the Font-Awesome icon name to use to render the marker.
icon_shape: string, default None
the icon shape
border_width: integer, default 3
the border width of the icon
border_color: string with hexadecimal RGB, default '#000'
the border color of the icon
text_color: string with hexadecimal RGB, default '#000'
the text color of the icon
background_color: string with hexadecimal RGB, default '#FFF'
the background color of the icon
inner_icon_style: string with css styles for the icon, default ''
the css styles of the icon
spin: boolean, default False
allow the icon to be spinning.
number: integer, default None
the number of the icon.
Examples
--------
Plugin Website: https://github.com/masajid390/BeautifyMarker
>>> BeautifyIcon(
... text_color="#000", border_color="transparent", background_color="#FFF"
... ).add_to(marker)
>>> number_icon = BeautifyIcon(
... text_color="#000",
... border_color="transparent",
... background_color="#FFF",
... number=10,
... inner_icon_style="font-size:12px;padding-top:-5px;",
... )
>>> Marker(
... location=[45.5, -122.3],
... popup=folium.Popup("Portland, OR"),
... icon=number_icon,
... )
>>> BeautifyIcon(icon="arrow-down", icon_shape="marker").add_to(marker)
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = new L.BeautifyIcon.icon(
{{ this.options|tojavascript }}
)
{{ this._parent.get_name() }}.setIcon({{ this.get_name() }});
{% endmacro %}
"""
)
ICON_SHAPE_TYPES = [
"circle",
"circle-dot",
"doughnut",
"rectangle-dot",
"marker",
None,
]
default_js = [
(
"beautify_icon_js",
"https://cdn.jsdelivr.net/gh/marslan390/BeautifyMarker/leaflet-beautify-marker-icon.min.js",
)
]
default_css = [
(
"beautify_icon_css",
"https://cdn.jsdelivr.net/gh/marslan390/BeautifyMarker/leaflet-beautify-marker-icon.min.css",
)
]
def __init__(
self,
icon=None,
icon_shape=None,
border_width=3,
border_color="#000",
text_color="#000",
background_color="#FFF",
inner_icon_style="",
spin=False,
number=None,
**kwargs
):
super().__init__()
self._name = "BeautifyIcon"
self.options = remove_empty(
icon=icon,
icon_shape=icon_shape,
border_width=border_width,
border_color=border_color,
text_color=text_color,
background_color=background_color,
inner_icon_style=inner_icon_style,
spin=spin,
isAlphaNumericIcon=number is not None,
text=number,
**kwargs
)
| BeautifyIcon |
python | apache__airflow | airflow-ctl/src/airflowctl/api/operations.py | {
"start": 2434,
"end": 4029
} | class ____(httpx.HTTPStatusError):
"""Server response error (Generic)."""
@classmethod
def from_response(cls, response: httpx.Response) -> ServerResponseError | None:
if response.status_code < 400:
return None
if response.headers.get("content-type") != "application/json":
return None
if 400 <= response.status_code < 500:
response.read()
return cls(
message=f"Client error message: {response.json()}",
request=response.request,
response=response,
)
msg = response.json()
self = cls(message=msg, request=response.request, response=response)
return self
def _check_flag_and_exit_if_server_response_error(func):
"""Return decorator to check for ServerResponseError and exit if the server is not running."""
def _exit_if_server_response_error(response: Any | ServerResponseError):
if isinstance(response, ServerResponseError):
raise response
return response
def wrapped(self, *args, **kwargs):
try:
if self.exit_in_error:
return _exit_if_server_response_error(response=func(self, *args, **kwargs))
return func(self, *args, **kwargs)
except httpx.ConnectError as e:
if "Connection refused" in str(e):
raise AirflowCtlConnectionException("Connection refused. Is the API server running?")
raise AirflowCtlConnectionException(f"Connection error: {e}")
return wrapped
| ServerResponseError |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 501448,
"end": 504423
} | class ____(Request):
"""
Mark a task status as in_progress. Optionally allows to set the task's execution progress.
:param force: If not true, call fails if the task status is not 'not_started'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "started"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'not_started'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(StartedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| StartedRequest |
python | lxml__lxml | src/lxml/tests/test_etree.py | {
"start": 175309,
"end": 179090
} | class ____(HelperTestCase):
def test_xinclude_text(self):
filename = fileInTestDir('test_broken.xml')
root = etree.XML('''\
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="%s" parse="text"/>
</doc>
''' % path2url(filename))
old_text = root.text
content = read_file(filename)
old_tail = root[0].tail
self.include( etree.ElementTree(root) )
self.assertEqual(old_text + content + old_tail,
root.text)
def test_xinclude(self):
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'))
self.assertNotEqual(
'a',
tree.getroot()[1].tag)
# process xincludes
self.include( tree )
# check whether we find it replaced with included data
self.assertEqual(
'a',
tree.getroot()[1].tag)
def test_xinclude_resolver(self):
class res(etree.Resolver):
include_text = read_file(fileInTestDir('test.xml'))
called = {}
def resolve(self, url, id, context):
if url.endswith(".dtd"):
self.called["dtd"] = True
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
elif url.endswith("test_xinclude.xml"):
self.called["input"] = True
return None # delegate to default resolver
else:
self.called["include"] = True
return self.resolve_string(self.include_text, context)
res_instance = res()
parser = etree.XMLParser(load_dtd = True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser = parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("dtd", True), ("include", True), ("input", True)],
called)
def test_xinclude_resolver_recursive(self):
data = textwrap.dedent('''
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<foo/>
<xi:include href="./test.xml" />
</doc>
''')
class Resolver(etree.Resolver):
called = {}
def resolve(self, url, id, context):
if url.endswith("test_xinclude.xml"):
assert not self.called.get("input")
self.called["input"] = True
return None # delegate to default resolver
elif url.endswith('/test5.xml'):
assert not self.called.get("DONE")
self.called["DONE"] = True
return self.resolve_string('<DONE/>', context)
else:
_, filename = url.rsplit('/', 1)
assert not self.called.get(filename)
self.called[filename] = True
next_data = data.replace(
'test.xml', 'test%d.xml' % len(self.called))
return self.resolve_string(next_data, context)
res_instance = Resolver()
parser = etree.XMLParser(load_dtd=True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser=parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("DONE", True), ("input", True), ("test.xml", True),
("test2.xml", True), ("test3.xml", True), ("test4.xml", True)],
called)
| _XIncludeTestCase |
python | getsentry__sentry | tests/sentry/utils/test_safe.py | {
"start": 6251,
"end": 6783
} | class ____(unittest.TestCase):
def test_dict(self) -> None:
d = {"1": None, "3": "4"}
assert safe_urlencode(d) == "1=&3=4"
assert d == {"1": None, "3": "4"}
d = {"1": "2", "3": "4"}
assert safe_urlencode(d) == "1=2&3=4"
def test_pair_sequence(self) -> None:
d = [["1", None], ["3", "4"]]
assert safe_urlencode(d) == "1=&3=4"
assert d == [["1", None], ["3", "4"]]
d = [["1", "2"], ["3", "4"]]
assert safe_urlencode(d) == "1=2&3=4"
| SafeUrlencodeTest |
python | django__django | tests/test_client_regress/tests.py | {
"start": 31218,
"end": 32006
} | class ____(TestDataMixin, TestCase):
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
with self.assertRaises(CustomTestException):
self.client.get("/staff_only/")
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a
# problem.
login = self.client.login(username="staff", password="password")
self.assertTrue(login, "Could not log in")
self.client.get("/staff_only/")
@override_settings(ROOT_URLCONF="test_client_regress.urls")
| ExceptionTests |
python | streamlit__streamlit | lib/tests/streamlit/config_test.py | {
"start": 54291,
"end": 63296
} | class ____(unittest.TestCase):
"""Tests that involve loading the config.toml file."""
def setUp(self):
self.patches = [
patch.object(
config, "_section_descriptions", new=copy.deepcopy(SECTION_DESCRIPTIONS)
),
patch.object(config, "_config_options", new=None),
]
for p in self.patches:
p.start()
def tearDown(self):
for p in self.patches:
p.stop()
def test_missing_config(self):
"""Test that we can initialize our config even if the file is missing."""
with patch("streamlit.config.os.path.exists") as path_exists:
path_exists.return_value = False
config.get_config_options()
assert config.get_option("browser.gatherUsageStats")
assert config.get_option("theme.font") is None
def test_load_global_config(self):
"""Test that ~/.streamlit/config.toml is read."""
global_config = """
[theme]
base = "dark"
font = "sans serif"
"""
global_config_path = "/mock/home/folder/.streamlit/config.toml"
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
assert config.get_option("theme.font") == "sans serif"
assert config.get_option("theme.textColor") is None
def test_load_local_config(self):
"""Test that $CWD/.streamlit/config.toml is read, even
if ~/.streamlit/config.toml is missing.
"""
local_config = """
[theme]
base = "light"
textColor = "#FFFFFF"
"""
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
open_patch = patch("streamlit.config.open", mock_open(read_data=local_config))
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == local_config_path
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
assert config.get_option("theme.textColor") == "#FFFFFF"
assert config.get_option("theme.font") is None
def test_load_global_local_config(self):
"""Test that $CWD/.streamlit/config.toml gets overlaid on
~/.streamlit/config.toml at parse time.
"""
global_config = """
[theme]
base = "dark"
font = "sans serif"
"""
local_config = """
[theme]
base = "light"
textColor = "#FFFFFF"
"""
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
local_open = mock_open(read_data=local_config)
file_open = mock_open()
file_open.side_effect = [global_open.return_value, local_open.return_value]
open_patch = patch("streamlit.config.open", file_open)
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path in [
global_config_path,
local_config_path,
]
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
# theme.base set in both local and global
assert config.get_option("theme.base") == "light"
# theme.font is set in global, and not in local
assert config.get_option("theme.font") == "sans serif"
# theme.textColor is set in local and not in global
assert config.get_option("theme.textColor") == "#FFFFFF"
def test_load_global_local_flag_config(self):
"""Test that CLI flags have higher priority than both
~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.
"""
global_config = """
[theme]
base = "dark"
font = "sans serif"
textColor = "#FFFFFF"
"""
local_config = """
[theme]
base = "light"
font = "serif"
"""
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
local_open = mock_open(read_data=local_config)
file_open = mock_open()
file_open.side_effect = [global_open.return_value, local_open.return_value]
open_patch = patch("streamlit.config.open", file_open)
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path in [
global_config_path,
local_config_path,
]
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(options_from_flags={"theme.font": "monospace"})
assert config.get_option("theme.base") == "light"
assert config.get_option("theme.textColor") == "#FFFFFF"
assert config.get_option("theme.font") == "monospace"
def test_upload_file_default_values(self):
assert config.get_option("server.maxUploadSize") == 200
def test_max_message_size_default_values(self):
assert config.get_option("server.maxMessageSize") == 200
def test_config_options_removed_on_reparse(self):
"""Test that config options that are removed in a file are also removed
from our _config_options dict."""
global_config_path = "/mock/home/folder/.streamlit/config.toml"
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
global_config = """
[theme]
base = "dark"
font = "sans serif"
"""
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
assert config.get_option("theme.base") == "dark"
assert config.get_option("theme.font") == "sans serif"
global_config = """
[theme]
base = "dark"
"""
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(force_reparse=True)
assert config.get_option("theme.base") == "dark"
assert None is config.get_option("theme.font")
@patch("streamlit.logger.get_logger")
def test_config_options_warn_on_server_change(self, get_logger):
"""Test that a warning is logged if a user changes a config file in the
server section."""
global_config_path = "/mock/home/folder/.streamlit/config.toml"
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
mock_logger = get_logger()
global_config = """
[server]
address = "localhost"
"""
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
global_config = """
[server]
address = "streamlit.io"
"""
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(force_reparse=True)
mock_logger.warning.assert_any_call(
"An update to the [server] config option section was detected."
" To have these changes be reflected, please restart streamlit."
)
| ConfigLoadingTest |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 34764,
"end": 42237
} | class ____(QueryTest):
def test_loader_options(self):
User = self.classes.User
s = fixture_session()
u1 = s.get(User, 8, options=[joinedload(User.addresses)])
eq_(len(u1.__dict__["addresses"]), 3)
def test_get_composite_pk_keyword_based_no_result(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
is_(s.get(CompositePk, {"i": 100, "j": 100}), None)
def test_get_composite_pk_keyword_based_result(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
one_two = s.get(CompositePk, {"i": 1, "j": 2})
eq_(one_two.i, 1)
eq_(one_two.j, 2)
eq_(one_two.k, 3)
def test_get_composite_pk_keyword_based_wrong_keys(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises(
sa_exc.InvalidRequestError, s.get, CompositePk, {"i": 1, "k": 2}
)
def test_get_composite_pk_keyword_based_too_few_keys(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises(sa_exc.InvalidRequestError, s.get, CompositePk, {"i": 1})
def test_get_composite_pk_keyword_based_too_many_keys(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises(
sa_exc.InvalidRequestError,
s.get,
CompositePk,
{"i": 1, "j": "2", "k": 3},
)
def test_get_synonym_direct_name(self, decl_base):
"""test #8753"""
class MyUser(decl_base):
__table__ = self.tables.users
syn_id = synonym("id")
s = fixture_session()
u = s.get(MyUser, {"syn_id": 7})
eq_(u.id, 7)
def test_get_synonym_indirect(self, decl_base):
"""test #8753"""
class MyUser(decl_base):
__table__ = self.tables.users
uid = __table__.c.id
syn_id = synonym("uid")
s = fixture_session()
u = s.get(MyUser, {"syn_id": 7})
eq_(u.uid, 7)
def test_get_composite_pk_no_result(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert s.get(CompositePk, (100, 100)) is None
def test_get_composite_pk_result(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
one_two = s.get(CompositePk, (1, 2))
assert one_two.i == 1
assert one_two.j == 2
assert one_two.k == 3
def test_get_too_few_params(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises_message(
sa_exc.InvalidRequestError,
r"Incorrect number of values in identifier to formulate "
r"primary key for session.get\(\); ",
s.get,
CompositePk,
7,
)
def test_get_too_few_params_tuple(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises_message(
sa_exc.InvalidRequestError,
r"Incorrect number of values in identifier to formulate "
r"primary key for session.get\(\); ",
s.get,
CompositePk,
(7,),
)
def test_get_too_many_params(self):
CompositePk = self.classes.CompositePk
s = fixture_session()
assert_raises_message(
sa_exc.InvalidRequestError,
r"Incorrect number of values in identifier to formulate "
r"primary key for session.get\(\); ",
s.get,
CompositePk,
(7, 10, 100),
)
def test_get_against_col(self):
User = self.classes.User
s = fixture_session()
assert_raises_message(
sa_exc.ArgumentError,
r"Expected mapped class or mapper, got: .*Instrumented",
s.get,
User.id,
(5,),
)
@testing.fixture
def outerjoin_mapping(self, registry):
users, addresses = self.tables.users, self.tables.addresses
s = users.outerjoin(addresses)
class UserThing(ComparableEntity):
pass
registry.map_imperatively(
UserThing,
s,
properties={
"id": (users.c.id, addresses.c.user_id),
"address_id": addresses.c.id,
},
)
return UserThing
def test_get_null_pk(self, outerjoin_mapping):
"""test that a mapping which can have None in a
PK (i.e. map to an outerjoin) works with get()."""
UserThing = outerjoin_mapping
sess = fixture_session()
u10 = sess.get(UserThing, (10, None))
eq_(u10, UserThing(id=10))
def test_get_fully_null_pk(self):
User = self.classes.User
s = fixture_session()
assert_warns_message(
sa_exc.SAWarning,
r"fully NULL primary key identity cannot load any object. "
"This condition may raise an error in a future release.",
s.get,
User,
None,
)
def test_get_fully_null_composite_pk(self, outerjoin_mapping):
UserThing = outerjoin_mapping
s = fixture_session()
assert_warns_message(
sa_exc.SAWarning,
r"fully NULL primary key identity cannot load any object. "
"This condition may raise an error in a future release.",
s.get,
UserThing,
(None, None),
)
def test_unique_param_names(self):
users = self.tables.users
class SomeUser:
pass
s = users.select().where(users.c.id != 12).alias("users")
m = self.mapper_registry.map_imperatively(SomeUser, s)
assert s.primary_key == m.primary_key
sess = fixture_session()
assert sess.get(SomeUser, 7).name == "jack"
def test_load(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session(autoflush=False)
assert s.get(User, 19, populate_existing=True) is None
u = s.get(User, 7, populate_existing=True)
u2 = s.get(User, 7, populate_existing=True)
assert u is u2
s.expunge_all()
u2 = s.get(User, 7, populate_existing=True)
assert u is not u2
u2.name = "some name"
a = Address(email_address="some other name")
u2.addresses.append(a)
assert u2 in s.dirty
assert a in u2.addresses
s.get(User, 7, populate_existing=True)
assert u2 not in s.dirty
assert u2.name == "jack"
assert a not in u2.addresses
@testing.requires.unicode_connections
def test_unicode(self, metadata, connection):
table = Table(
"unicode_data",
metadata,
Column("id", Unicode(40), primary_key=True),
Column("data", Unicode(40)),
)
metadata.create_all(connection)
ustring = util.b("petit voix m\xe2\x80\x99a").decode("utf-8")
connection.execute(table.insert(), dict(id=ustring, data=ustring))
class LocalFoo(self.classes.Base):
pass
self.mapper_registry.map_imperatively(LocalFoo, table)
with Session(connection) as sess:
eq_(
sess.get(LocalFoo, ustring),
LocalFoo(id=ustring, data=ustring),
)
| GetTest |
python | doocs__leetcode | solution/2900-2999/2980.Check if Bitwise OR Has Trailing Zeros/Solution.py | {
"start": 0,
"end": 122
} | class ____:
def hasTrailingZeros(self, nums: List[int]) -> bool:
return sum(x & 1 ^ 1 for x in nums) >= 2
| Solution |
python | nedbat__coveragepy | tests/test_data.py | {
"start": 23505,
"end": 26496
} | class ____(CoverageTest):
"""Tests of CoverageData that need a temporary directory to make files."""
@pytest.mark.parametrize("file_class", FilePathClasses)
def test_read_write_lines(self, file_class: FilePathType) -> None:
self.assert_doesnt_exist("lines.dat")
covdata1 = DebugCoverageData(file_class("lines.dat"))
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_exists("lines.dat")
covdata2 = DebugCoverageData("lines.dat")
covdata2.read()
assert_lines1_data(covdata2)
def test_read_write_arcs(self) -> None:
covdata1 = DebugCoverageData("arcs.dat")
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData("arcs.dat")
covdata2.read()
assert_arcs3_data(covdata2)
def test_read_errors(self) -> None:
self.make_file("xyzzy.dat", "xyzzy")
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]xyzzy.dat': \S+"):
covdata = DebugCoverageData("xyzzy.dat")
covdata.read()
assert not covdata
def test_hard_read_error(self) -> None:
self.make_file("noperms.dat", "go away")
os.chmod("noperms.dat", 0)
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]noperms.dat': \S+"):
covdata = DebugCoverageData("noperms.dat")
covdata.read()
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_error_when_closing(self, klass: TCoverageData) -> None:
msg = r"Couldn't .* '.*[/\\]flaked.dat': \S+"
with pytest.raises(DataError, match=msg):
covdata = klass("flaked.dat")
covdata.add_lines(LINES_1)
# I don't know how to make a real error, so let's fake one.
sqldb = list(covdata._dbs.values())[0]
sqldb.close = lambda: 1 / 0 # type: ignore
covdata.add_lines(LINES_1)
def test_wrong_schema_version(self) -> None:
with sqlite3.connect("wrong_schema.db") as con:
con.execute("create table coverage_schema (version integer)")
con.execute("insert into coverage_schema (version) values (99)")
msg = r"Couldn't .* '.*[/\\]wrong_schema.db': wrong schema: 99 instead of \d+"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema.db")
covdata.read()
assert not covdata
def test_wrong_schema_schema(self) -> None:
with sqlite3.connect("wrong_schema_schema.db") as con:
con.execute("create table coverage_schema (xyzzy integer)")
con.execute("insert into coverage_schema (xyzzy) values (99)")
msg = r"Data file .* doesn't seem to be a coverage data file: .* no such column"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema_schema.db")
covdata.read()
assert not covdata
| CoverageDataInTempDirTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/openstreetmap/views.py | {
"start": 186,
"end": 388
} | class ____(OAuth):
url = "https://api.openstreetmap.org/api/0.6/user/details.json"
def get_user_info(self):
data = self.query(self.url).json()
return data["user"]
| OpenStreetMapAPI |
python | google__jax | jax/_src/linear_util.py | {
"start": 4582,
"end": 9972
} | class ____:
"""Represents a function `f` to which `transforms` are to be applied.
Args:
f: the function to be transformed.
f_transformed: transformed function.
transforms: a tuple of `(gen, gen_static_args)` tuples representing
transformations to apply to `f.` Here `gen` is a generator function and
`gen_static_args` is a tuple of static arguments for the generator. See
description at the start of this module for the expected behavior of the
generator.
stores: a list of out_store for the auxiliary output of the `transforms`.
params: a tuple of `(name, param)` tuples representing extra parameters to
pass as keyword arguments to `f`, along with the transformed keyword
arguments.
in_type: optional input type
debug_info: debugging info about the function being wrapped.
"""
__slots__ = ("f", "f_transformed", "transforms", "stores", "params", "in_type", "debug_info")
f: Callable
f_transformed: Callable
transforms: tuple[tuple[Callable, tuple[Hashable, ...]], ...]
stores: tuple[Store | EqualStore | None, ...]
params: tuple[tuple[str, Any], ...]
in_type: core.InputType | None
debug_info: DebugInfo
def __init__(self, f: Callable,
f_transformed: Callable,
transforms: tuple[tuple[Callable, tuple[Hashable, ...]], ...],
stores: tuple[Store | EqualStore | None, ...],
params: tuple[tuple[str, Hashable], ...],
in_type: core.InputType | None,
debug_info: DebugInfo):
self.f = f
self.f_transformed = f_transformed
self.transforms = transforms
self.stores = stores
self.params = params
self.in_type = in_type
self.debug_info = debug_info
@property
def __name__(self):
return fun_name(self.f, "<unnamed wrapped function>")
def wrap(self, gen, gen_static_args,
out_store: Store | EqualStore | None) -> WrappedFun:
"""Add another transform and its store."""
if out_store is None:
return WrappedFun(self.f, partial(gen, self.f_transformed, *gen_static_args),
((gen, gen_static_args),) + self.transforms,
(out_store,) + self.stores, self.params, None, self.debug_info)
else:
return WrappedFun(self.f, partial(gen, self.f_transformed, out_store, *gen_static_args),
((gen, gen_static_args),) + self.transforms,
(out_store,) + self.stores, self.params, None, self.debug_info)
def populate_stores(self, stores):
"""Copy the values from the `stores` into `self.stores`."""
for self_store, other_store in zip(self.stores, stores):
if self_store is not None:
self_store.store(other_store.val)
def call_wrapped(self, *args, **kwargs):
"""Calls the transformed function"""
return self.f_transformed(*args, **kwargs)
def __repr__(self):
def transform_to_str(x):
i, (gen, args) = x
return f"{i} : {fun_name(gen)} {fun_name(args)}"
transformation_stack = map(transform_to_str, enumerate(self.transforms))
return "Wrapped function:\n" + '\n'.join(transformation_stack) + '\nCore: ' + fun_name(self.f) + '\n'
def __hash__(self):
return hash((self.f, self.transforms, self.params, self.in_type,
self.debug_info))
def __eq__(self, other):
return (self.f == other.f and self.transforms == other.transforms and
self.params == other.params and self.in_type == other.in_type and
self.debug_info == other.debug_info)
def replace_debug_info(self, dbg: core.DebugInfo) -> WrappedFun:
return WrappedFun(self.f, self.f_transformed, self.transforms,
self.stores, self.params, self.in_type,
dbg)
def with_unknown_names(self) -> WrappedFun:
return self.replace_debug_info(self.debug_info.with_unknown_names())
@curry
def transformation2(gen, fun: WrappedFun, *gen_static_args) -> WrappedFun:
"""Adds one more transformation to a WrappedFun.
Args:
gen: the transformation generator function
fun: a WrappedFun on which to apply the transformation
gen_static_args: static args for the generator function
"""
return fun.wrap(gen, gen_static_args, None)
# Backwards compat only. TODO: deprecate
@curry
def transformation(gen, fun: WrappedFun, *gen_static_args) -> WrappedFun:
def gen2(f, *args, **kwargs):
gen_inst = gen(*args, **kwargs)
args_, kwargs_ = next(gen_inst)
return gen_inst.send(f(*args_, **kwargs_))
return transformation2(gen2, fun, *gen_static_args)()
# Backwards compat only. TODO: deprecate
@curry
def transformation_with_aux(gen, fun: WrappedFun, *gen_static_args) -> WrappedFun:
def gen2(f, store, *args, **kwargs):
gen_inst = gen(*args, **kwargs)
args_, kwargs_ = next(gen_inst)
ans, aux = gen_inst.send(f(*args_, **kwargs_))
store.store(aux)
return ans
return transformation_with_aux2(gen2, fun, *gen_static_args)()
@curry
def transformation_with_aux2(
gen, fun: WrappedFun, *gen_static_args, use_eq_store: bool = False
) -> tuple[WrappedFun, Callable[[], Any]]:
"""Adds one more transformation with auxiliary output to a WrappedFun."""
out_store = Store() if not use_eq_store else EqualStore()
out_thunk = lambda: out_store.val
return fun.wrap(gen, gen_static_args, out_store), out_thunk
| WrappedFun |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 759,
"end": 1949
} | class ____:
"""Use this test mixin for simple license plate validation"""
def perform_extra_checks(self, license_plate, match):
pass
def test_license_plate(self, faker, num_samples):
for _ in range(num_samples):
license_plate = faker.license_plate()
match = self.license_plate_pattern.fullmatch(license_plate)
assert match is not None
self.perform_extra_checks(license_plate, match)
def test_vin(self, faker, num_samples):
for _ in range(num_samples):
vin_number = faker.vin()
# length check: 17
assert len(vin_number) == 17
# verify checksum: vin_number[8]
front_part_weight = calculate_vin_str_weight(vin_number[:8], [8, 7, 6, 5, 4, 3, 2, 10])
rear_part_weight = calculate_vin_str_weight(vin_number[9:], [9, 8, 7, 6, 5, 4, 3, 2])
checksum = (front_part_weight + rear_part_weight) % 11
checksum_str = "X" if checksum == 10 else str(checksum)
assert vin_number[8] == checksum_str
for char in vin_number[13:]:
assert char in string.digits
| _SimpleAutomotiveTestMixin |
python | pallets__werkzeug | tests/test_datastructures.py | {
"start": 36762,
"end": 38200
} | class ____:
@pytest.mark.parametrize(
("values", "matches", "default", "expect"),
[
([("text/*", 1)], ["text/html"], None, "text/html"),
([("text/*", 1)], ["image/png"], "text/plain", "text/plain"),
([("text/*", 1)], ["image/png"], None, None),
(
[("*/*", 1), ("text/html", 1)],
["image/png", "text/html"],
None,
"text/html",
),
(
[("*/*", 1), ("text/html", 1)],
["image/png", "text/plain"],
None,
"image/png",
),
(
[("*/*", 1), ("text/html", 1), ("image/*", 1)],
["image/png", "text/html"],
None,
"text/html",
),
(
[("*/*", 1), ("text/html", 1), ("image/*", 1)],
["text/plain", "image/png"],
None,
"image/png",
),
(
[("text/html", 1), ("text/html; level=1", 1)],
["text/html;level=1"],
None,
"text/html;level=1",
),
],
)
def test_mime_accept(self, values, matches, default, expect):
accept = ds.MIMEAccept(values)
match = accept.best_match(matches, default=default)
assert match == expect
| TestMIMEAccept |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 41799,
"end": 43366
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("zh_CN")
Faker.seed(0)
def test_zh_CN_ssn(self):
for _ in range(100):
ssn = self.fake.ssn()
assert len(ssn) == 18
def test_zh_CN_ssn_invalid_gender_passed(self):
with pytest.raises(ValueError):
self.fake.ssn(gender="X")
with pytest.raises(ValueError):
self.fake.ssn(gender="*")
with pytest.raises(ValueError):
self.fake.ssn(gender="22")
def test_zh_CN_ssn_gender_passed(self):
# Females have even number at index 17
ssn = self.fake.ssn(gender="F")
assert int(ssn[16]) % 2 == 0
# Males have odd number at index 17
ssn = self.fake.ssn(gender="M")
assert int(ssn[16]) % 2 == 1
def test_zh_CN_ssn_invalid_area_code_passed(self):
ssn = self.fake.ssn(area_code=12)
assert int(ssn[0:6]) > 0
ssn = self.fake.ssn(area_code={})
assert int(ssn[0:6]) > 0
ssn = self.fake.ssn(area_code=[])
assert int(ssn[0:6]) > 0
ssn = self.fake.ssn(area_code=None)
assert int(ssn[0:6]) > 0
ssn = self.fake.ssn()
assert int(ssn[0:6]) > 0
def test_zh_CN_ssn_area_code_passed(self):
#
ssn = self.fake.ssn(area_code="654225")
assert int(ssn[0:6]) == 654225
ssn = self.fake.ssn(area_code="820000")
assert int(ssn[0:6]) == 820000
ssn = self.fake.ssn(area_code="830000")
assert int(ssn[0:6]) == 830000
| TestZhCN |
python | pallets__jinja | src/jinja2/idtracking.py | {
"start": 5079,
"end": 7214
} | class ____(NodeVisitor):
def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = _simple_visit
visit_Block = _simple_visit
visit_Macro = _simple_visit
visit_FilterBlock = _simple_visit
visit_Scope = _simple_visit
visit_If = _simple_visit
visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
if branch:
for item in branch:
self.sym_visitor.visit(item)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
| RootVisitor |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 7326,
"end": 7398
} | class ____(TestAtomFeed):
feed_type = MyCustomAtom1Feed
| TestCustomFeed |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/summary_ops/summary_ops_test.py | {
"start": 47535,
"end": 49243
} | class ____(test_util.TensorFlowTestCase):
def testNoopWriter_doesNothing(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_noop_writer()
writer.init()
with writer.as_default():
result = summary_ops.write('test', 1.0, step=0)
writer.flush()
writer.close()
self.assertFalse(result) # Should have found no active writer
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertLen(files, 0)
def testNoopWriter_asNestedContext_isTransparent(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
noop_writer = summary_ops.create_noop_writer()
with writer.as_default():
result1 = summary_ops.write('first', 1.0, step=0)
with noop_writer.as_default():
result2 = summary_ops.write('second', 1.0, step=0)
result3 = summary_ops.write('third', 1.0, step=0)
# All ops should have written, including the one inside the no-op writer,
# since it doesn't actively *disable* writing - it just behaves as if that
# entire `with` block wasn't there at all.
self.assertAllEqual([result1, result2, result3], [True, True, True])
def testNoopWriter_setAsDefault(self):
try:
with context.eager_mode():
writer = summary_ops.create_noop_writer()
writer.set_as_default()
result = summary_ops.write('test', 1.0, step=0)
self.assertFalse(result) # Should have found no active writer
finally:
# Ensure we clean up no matter how the test executes.
summary_ops._summary_state.writer = None # pylint: disable=protected-access
| NoopWriterTest |
python | oauthlib__oauthlib | oauthlib/oauth1/rfc5849/endpoints/signature_only.py | {
"start": 315,
"end": 3327
} | class ____(BaseEndpoint):
"""An endpoint only responsible for verifying an oauth signature."""
def validate_request(self, uri, http_method='GET',
body=None, headers=None):
"""Validate a signed OAuth request.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error as err:
log.info(
'Exception caught while validating request, %s.' % err)
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error as err:
log.info(
'Exception caught while validating request, %s.' % err)
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request):
log.debug('[Failure] verification failed: timestamp/nonce')
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid signature: %s", valid_signature)
return v, request
| SignatureOnlyEndpoint |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 5210,
"end": 5373
} | class ____(Message):
"""
Indicates a return statement outside of a function/method.
"""
message = '\'return\' outside function'
| ReturnOutsideFunction |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_missouri_zip.py | {
"start": 1751,
"end": 4094
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Missouri zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_missouri_zip": ["63376", "64001", "64638", "65324"],
"invalid_missouri_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_missouri_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_missouri_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_missouri_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMissouriZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidMissouriZip |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/cli_shared_test.py | {
"start": 13470,
"end": 15831
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self.var_a = variables.Variable(42.0, name="a")
def tearDown(self):
ops.reset_default_graph()
def testShapeError(self):
tf_error = errors.OpError(None, self.var_a.initializer, "foo description",
None)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertEqual("!!! An error occurred during the run !!!",
error_intro.lines[1])
self.assertEqual([(0, len(error_intro.lines[1]), "blink")],
error_intro.font_attr_segs[1])
self.assertEqual(2, error_intro.lines[4].index("ni -a -d -t a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[4][0][0])
self.assertEqual(22, error_intro.font_attr_segs[4][0][1])
self.assertEqual("ni -a -d -t a/Assign",
error_intro.font_attr_segs[4][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[4][0][2][1])
self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[6][0][0])
self.assertEqual(16, error_intro.font_attr_segs[6][0][1])
self.assertEqual("li -r a/Assign",
error_intro.font_attr_segs[6][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[6][0][2][1])
self.assertEqual(2, error_intro.lines[8].index("lt"))
self.assertEqual(2, error_intro.font_attr_segs[8][0][0])
self.assertEqual(4, error_intro.font_attr_segs[8][0][1])
self.assertEqual("lt", error_intro.font_attr_segs[8][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[8][0][2][1])
self.assertStartsWith(error_intro.lines[11], "Op name:")
self.assertTrue(error_intro.lines[11].endswith("a/Assign"))
self.assertStartsWith(error_intro.lines[12], "Error type:")
self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))
self.assertEqual("Details:", error_intro.lines[14])
self.assertStartsWith(error_intro.lines[15], "foo description")
def testGetErrorIntroForNoOpName(self):
tf_error = errors.OpError(None, None, "Fake OpError", -1)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertIn("Cannot determine the name of the op", error_intro.lines[3])
if __name__ == "__main__":
googletest.main()
| GetErrorIntroTest |
python | apache__thrift | contrib/zeromq/test-server.py | {
"start": 885,
"end": 1591
} | class ____(storage.Storage.Iface):
def __init__(self):
self.value = 0
def incr(self, amount):
self.value += amount
def get(self):
return self.value
def main():
handler = StorageHandler()
processor = storage.Storage.Processor(handler)
ctx = zmq.Context()
reqrep_server = TZmqServer.TZmqServer(processor, ctx, "tcp://0.0.0.0:9090", zmq.REP)
oneway_server = TZmqServer.TZmqServer(processor, ctx, "tcp://0.0.0.0:9091", zmq.PULL)
multiserver = TZmqServer.TZmqMultiServer()
multiserver.servers.append(reqrep_server)
multiserver.servers.append(oneway_server)
multiserver.serveForever()
if __name__ == "__main__":
main()
| StorageHandler |
python | django-import-export__django-import-export | tests/core/admin.py | {
"start": 647,
"end": 804
} | class ____(ModelResource):
class Meta:
model = Book
fields = ["id", "name"]
name = "Export/Import only book names"
| BookNameResource |
python | kamyu104__LeetCode-Solutions | Python/strong-password-checker-ii.py | {
"start": 38,
"end": 566
} | class ____(object):
def strongPasswordCheckerII(self, password):
"""
:type password: str
:rtype: bool
"""
SPECIAL = set("!@#$%^&*()-+")
return (len(password) >= 8 and
any(c.islower() for c in password) and
any(c.isupper() for c in password) and
any(c.isdigit() for c in password) and
any(c in SPECIAL for c in password) and
all(password[i] != password[i+1] for i in xrange(len(password)-1)))
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_not_to_overlap.py | {
"start": 1845,
"end": 5058
} | class ____(ColumnAggregateExpectation):
"""Expect geometries in this column Not to overlap with each other. If any two geometries do overlap, expectation will return False.
For more information look here \
https://stackoverflow.com/questions/64042379/shapely-is-valid-returns-true-to-invalid-overlap-polygons
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"geometry_not_overlaps": [
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
Point(5, 6),
],
"geometry_overlaps": [
Polygon([(0, 0), (1, 1), (0, 1)]),
Polygon([(10, 0), (10, 5), (0, 0)]),
Polygon([(0, 0), (2, 2), (2, 0)]),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "geometry_not_overlaps"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "geometry_overlaps"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
metric_dependencies = ("column_values.geometry_not_overlap",)
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
def _validate(
self,
metrics,
runtime_configuration: dict = None,
execution_engine=None,
):
success = metrics.get("column_values.geometry_not_overlap").get("success")
indices = metrics.get("column_values.geometry_not_overlap").get("indices")
return {"success": success, "result": {"overlapping_indices": indices}}
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon-22",
"geospatial",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["rtree", "geopandas", "shapely", "numpy"],
}
if __name__ == "__main__":
ExpectColumnValuesGeometryNotToOverlap().print_diagnostic_checklist()
| ExpectColumnValuesGeometryNotToOverlap |
python | getsentry__sentry | src/sentry/api/endpoints/organization_profiling_profiles.py | {
"start": 1196,
"end": 2472
} | class ____(serializers.Serializer):
# fingerprint is an UInt32
fingerprint = serializers.IntegerField(min_value=0, max_value=(1 << 32) - 1, required=False)
dataSource = serializers.ChoiceField(
["transactions", "profiles", "functions", "spans"], required=False
)
query = serializers.CharField(required=False)
expand = serializers.ListField(child=serializers.ChoiceField(["metrics"]), required=False)
def validate(self, attrs):
source = attrs.get("dataSource")
if source is None:
if attrs.get("fingerprint") is not None:
attrs["dataSource"] = "functions"
else:
attrs["dataSource"] = "transactions"
elif source == "functions":
attrs["dataSource"] = "functions"
elif attrs.get("fingerprint") is not None:
raise ParseError(
detail='"fingerprint" is only permitted when using dataSource: "functions"'
)
elif source == "profiles":
attrs["dataSource"] = "profiles"
elif source == "spans":
attrs["dataSource"] = "spans"
else:
attrs["dataSource"] = "transactions"
return attrs
@region_silo_endpoint
| OrganizationProfilingFlamegraphSerializer |
python | sqlalchemy__sqlalchemy | examples/space_invaders/space_invaders.py | {
"start": 7327,
"end": 7468
} | class ____(Glyph):
"""Describe a glyph for displaying a message."""
__mapper_args__ = {"polymorphic_identity": "message"}
| MessageGlyph |
python | spack__spack | lib/spack/spack/vendor/jinja2/loaders.py | {
"start": 18769,
"end": 20287
} | class ____(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders: t.Sequence[BaseLoader]) -> None:
self.loaders = loaders
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self) -> t.List[str]:
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
| ChoiceLoader |
python | pytorch__pytorch | benchmarks/dynamo/huggingface_llm_models.py | {
"start": 648,
"end": 811
} | class ____:
@staticmethod
def get_model_and_inputs(model_name, device):
raise NotImplementedError("get_model_and_inputs() not implemented")
| Benchmark |
python | sqlalchemy__sqlalchemy | examples/space_invaders/space_invaders.py | {
"start": 7745,
"end": 19709
} | class ____(Glyph):
"""Describe a glyph representing a "splat"."""
__mapper_args__ = {"polymorphic_identity": "splat"}
def glyph_for_state(self, coord, state):
age = state["tick"] - coord.tick
if age > 5:
return self.alt_data
else:
return self.data
def init_glyph(session):
"""Create the glyphs used during play."""
enemy1 = ArmyGlyph(
"enemy1",
"""
#W-#B^#R-#B^#W-
#G| |
""",
"""
#W>#B^#R-#B^#W<
#G^ ^
""",
)
enemy2 = ArmyGlyph(
"enemy2",
"""
#W***
#R<#C~~~#R>
""",
"""
#W@@@
#R<#C---#R>
""",
)
enemy3 = ArmyGlyph(
"enemy3",
"""
#Y((--))
#M-~-~-~
""",
"""
#Y[[--]]
#M~-~-~-
""",
)
saucer = SaucerGlyph(
"saucer",
"""#R~#Y^#R~#G<<((=#WOO#G=))>>""",
"""#Y^#R~#Y^#G<<((=#WOO#G=))>>""",
)
splat1 = SplatGlyph(
"splat1",
"""
#WVVVVV
#W> #R*** #W<
#W^^^^^
""",
"""
#M|
#M- #Y+++ #M-
#M|
""",
)
ship = PlayerGlyph(
"ship",
"""
#Y^
#G=====
""",
)
missile = MissileGlyph(
"missile",
"""
|
""",
)
start = MessageGlyph(
"start_message",
"J = move left; L = move right; SPACE = fire\n"
" #GPress any key to start",
)
lose = MessageGlyph("lose_message", "#YY O U L O S E ! ! !")
win = MessageGlyph("win_message", "#RL E V E L C L E A R E D ! ! !")
paused = MessageGlyph(
"pause_message", "#WP A U S E D\n#GPress P to continue"
)
session.add_all(
[
enemy1,
enemy2,
enemy3,
ship,
saucer,
missile,
start,
lose,
win,
paused,
splat1,
]
)
def setup_curses():
"""Setup terminal/curses state."""
window = curses.initscr()
curses.noecho()
window = curses.newwin(
WINDOW_HEIGHT + (VERT_PADDING * 2),
WINDOW_WIDTH + (HORIZ_PADDING * 2),
WINDOW_TOP - VERT_PADDING,
WINDOW_LEFT - HORIZ_PADDING,
)
curses.start_color()
global _COLOR_PAIRS
_COLOR_PAIRS = {}
for i, (k, v) in enumerate(COLOR_MAP.items(), 1):
curses.init_pair(i, v, curses.COLOR_BLACK)
_COLOR_PAIRS[k] = curses.color_pair(i)
return window
def init_positions(session):
"""Establish a new field of play.
This generates GlyphCoordinate objects
and persists them to the database.
"""
# delete all existing coordinates
session.query(GlyphCoordinate).delete()
session.add(
GlyphCoordinate(
session, "ship", WINDOW_WIDTH // 2 - 2, WINDOW_HEIGHT - 4
)
)
arrangement = (
("enemy3", 50),
("enemy2", 25),
("enemy1", 10),
("enemy2", 25),
("enemy1", 10),
)
for ship_vert, (etype, score) in zip(
range(5, 30, ENEMY_VERT_SPACING), arrangement
):
for ship_horiz in range(0, 50, 10):
session.add(
GlyphCoordinate(
session, etype, ship_horiz, ship_vert, score=score
)
)
def draw(session, window, state):
"""Load all current GlyphCoordinate objects from the
database and render.
"""
for gcoord in session.query(GlyphCoordinate).options(
joinedload(GlyphCoordinate.glyph)
):
gcoord.render(window, state)
window.addstr(1, WINDOW_WIDTH - 5, "Score: %.4d" % state["score"])
window.move(0, 0)
window.refresh()
def check_win(session, state):
"""Return the number of army glyphs remaining -
the player wins if this is zero."""
return (
session.query(func.count(GlyphCoordinate.id))
.join(GlyphCoordinate.glyph.of_type(ArmyGlyph))
.scalar()
)
def check_lose(session, state):
"""Return the number of army glyphs either colliding
with the player or hitting the bottom of the screen.
The player loses if this is non-zero."""
player = state["player"]
return (
session.query(GlyphCoordinate)
.join(GlyphCoordinate.glyph.of_type(ArmyGlyph))
.filter(
GlyphCoordinate.intersects(player) | GlyphCoordinate.bottom_bound
)
.count()
)
def render_message(session, window, msg, x, y):
"""Render a message glyph.
Clears the area beneath the message first
and assumes the display will be paused
afterwards.
"""
# create message box
msg = GlyphCoordinate(session, msg, x, y)
# clear existing glyphs which intersect
for gly in (
session.query(GlyphCoordinate)
.join(GlyphCoordinate.glyph)
.filter(GlyphCoordinate.intersects(msg))
):
gly.blank(window)
# render
msg.render(window, {})
window.refresh()
return msg
def win(session, window, state):
"""Handle the win case."""
render_message(session, window, "win_message", 15, 15)
time.sleep(2)
start(session, window, state, True)
def lose(session, window, state):
"""Handle the lose case."""
render_message(session, window, "lose_message", 15, 15)
time.sleep(2)
start(session, window, state)
def pause(session, window, state):
"""Pause the game."""
msg = render_message(session, window, "pause_message", 15, 15)
prompt(window)
msg.blank(window)
session.delete(msg)
def prompt(window):
"""Display a prompt, quashing any keystrokes
which might have remained."""
window.move(0, 0)
window.nodelay(1)
window.getch()
window.nodelay(0)
window.getch()
window.nodelay(1)
def move_army(session, window, state):
"""Update the army position based on the current
size of the field."""
speed = 30 // 25 * state["num_enemies"]
flip = (state["tick"] % speed) == 0
if not flip:
return
else:
state["flip"] = not state["flip"]
x_slide = 1
# get the lower/upper boundaries of the army
# along the X axis.
min_x, max_x = (
session.query(
func.min(GlyphCoordinate.x),
func.max(GlyphCoordinate.x + GlyphCoordinate.width),
)
.join(GlyphCoordinate.glyph.of_type(ArmyGlyph))
.first()
)
if min_x is None or max_x is None:
# no enemies
return
direction = state["army_direction"]
move_y = False
if direction == 0 and max_x + x_slide >= MAX_X:
direction = state["army_direction"] = 1
move_y = True
elif direction == 1 and min_x - x_slide <= 0:
direction = state["army_direction"] = 0
move_y = True
for enemy_g in session.query(GlyphCoordinate).join(
GlyphCoordinate.glyph.of_type(ArmyGlyph)
):
enemy_g.blank(window)
if move_y:
enemy_g.y += 1
elif direction == 0:
enemy_g.x += x_slide
elif direction == 1:
enemy_g.x -= x_slide
def move_player(session, window, state):
"""Receive player input and adjust state."""
ch = window.getch()
if ch not in (LEFT_KEY, RIGHT_KEY, FIRE_KEY, PAUSE_KEY):
return
elif ch == PAUSE_KEY:
pause(session, window, state)
return
player = state["player"]
if ch == RIGHT_KEY and not player.right_bound:
player.blank(window)
player.x += 1
elif ch == LEFT_KEY and not player.left_bound:
player.blank(window)
player.x -= 1
elif ch == FIRE_KEY and state["missile"] is None:
state["missile"] = GlyphCoordinate(
session, "missile", player.x + 3, player.y - 1
)
def move_missile(session, window, state):
"""Update the status of the current missile, if any."""
if state["missile"] is None or state["tick"] % 2 != 0:
return
missile = state["missile"]
# locate enemy glyphs which intersect with the
# missile's current position; i.e. a hit
glyph = (
session.query(GlyphCoordinate)
.join(GlyphCoordinate.glyph.of_type(EnemyGlyph))
.filter(GlyphCoordinate.intersects(missile))
.first()
)
missile.blank(window)
if glyph or missile.top_bound:
# missile is done
session.delete(missile)
state["missile"] = None
if glyph:
# score!
score(session, window, state, glyph)
else:
# move missile up one character.
missile.y -= 1
def move_saucer(session, window, state):
"""Update the status of the saucer."""
saucer_interval = 500
saucer_speed_interval = 4
if state["saucer"] is None and state["tick"] % saucer_interval != 0:
return
if state["saucer"] is None:
state["saucer"] = saucer = GlyphCoordinate(
session, "saucer", -6, 1, score=random.randrange(100, 600, 100)
)
elif state["tick"] % saucer_speed_interval == 0:
saucer = state["saucer"]
saucer.blank(window)
saucer.x += 1
if saucer.right_edge_bound:
session.delete(saucer)
state["saucer"] = None
def update_splat(session, window, state):
"""Render splat animations."""
for splat in session.query(GlyphCoordinate).join(
GlyphCoordinate.glyph.of_type(SplatGlyph)
):
age = state["tick"] - splat.tick
if age > 10:
splat.blank(window)
session.delete(splat)
else:
splat.render(window, state)
def score(session, window, state, glyph):
"""Process a glyph intersecting with a missile."""
glyph.blank(window)
session.delete(glyph)
if state["saucer"] is glyph:
state["saucer"] = None
state["score"] += glyph.score
# render a splat !
GlyphCoordinate(
session,
"splat1",
glyph.x,
glyph.y,
tick=state["tick"],
label=str(glyph.score),
)
def update_state(session, window, state):
"""Update all state for each game tick."""
num_enemies = state["num_enemies"] = check_win(session, state)
if num_enemies == 0:
win(session, window, state)
elif check_lose(session, state):
lose(session, window, state)
else:
# update the tick counter.
state["tick"] += 1
move_player(session, window, state)
move_missile(session, window, state)
move_army(session, window, state)
move_saucer(session, window, state)
update_splat(session, window, state)
def start(session, window, state, continue_=False):
"""Start a new field of play."""
render_message(session, window, "start_message", 15, 20)
prompt(window)
init_positions(session)
player = (
session.query(GlyphCoordinate)
.join(GlyphCoordinate.glyph.of_type(PlayerGlyph))
.one()
)
state.update(
{
"field_pos": 0,
"alt": False,
"tick": 0,
"missile": None,
"saucer": None,
"player": player,
"army_direction": 0,
"flip": False,
}
)
if not continue_:
state["score"] = 0
window.clear()
window.box()
draw(session, window, state)
def main():
"""Initialize the database and establish the game loop."""
e = create_engine("sqlite://")
Base.metadata.create_all(e)
session = Session(e)
init_glyph(session)
session.commit()
window = setup_curses()
state = {}
start(session, window, state)
while True:
update_state(session, window, state)
draw(session, window, state)
time.sleep(0.01)
if __name__ == "__main__":
main()
| SplatGlyph |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 12113,
"end": 12180
} | class ____(UnaryOp):
pass
@infer_global(operator.pos)
| UnaryNegate |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_audio.py | {
"start": 7524,
"end": 8848
} | class ____(Data2VecAudioPreTrainedModel, Wav2Vec2ForCTC):
def __init__(self, config):
Data2VecAudioPreTrainedModel.__init__(self, config)
self.data2vec_audio = Data2VecAudioModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_base_model(self):
raise AttributeError("Not needed for Data2VecAudio")
def tie_weights(self):
raise AttributeError("Not needed for Data2VecAudio")
def forward(self, **super_kwargs):
return super().forward(**super_kwargs)
| Data2VecAudioForCTC |
python | walkccc__LeetCode | solutions/2167. Minimum Time to Remove All Cars Containing Illegal Goods/2167.py | {
"start": 0,
"end": 738
} | class ____:
def minimumTime(self, s: str) -> int:
n = len(s)
# left[i] := the minimum time to remove the illegal cars of s[0..i]
left = [0] * n
left[0] = int(s[0])
# dp[i] := the minimum time to remove the illegal cars of s[0..i] optimally
# + the time to remove the illegal cars of s[i + 1..n) consecutively
# Note that the way to remove the illegal cars in the right part
# doesn't need to be optimal since:
# `left | illegal cars | n - 1 - k` will be covered in
# `left' | n - 1 - i` later.
dp = [n] * n
dp[0] = left[0] + n - 1
for i in range(1, n):
left[i] = min(left[i - 1] + int(s[i]) * 2, i + 1)
dp[i] = min(dp[i], left[i] + n - 1 - i)
return min(dp)
| Solution |
python | pyparsing__pyparsing | examples/tiny/tiny_ast.py | {
"start": 8055,
"end": 9412
} | class ____(TinyNode):
"""Declaration statement node.
Represents one declaration statement possibly declaring multiple
identifiers with optional initializers, for example:
int x := 1, y, z := 2;
Fields:
- dtype: declared datatype ("int", "float", or "string").
- decls: list of (name, init_expr | None).
"""
statement_type: ClassVar[str] = "decl_stmt"
dtype: str = "int"
# list of (name, init_expr | None)
decls: List[Tuple[str, Optional[object]]] = field(default_factory=list)
@classmethod
def from_parsed(cls, parsed: pp.ParseResults) -> DeclStmtNode:
dtype = parsed.datatype or "int"
items: List[Tuple[str, Optional[object]]] = []
for d in (parsed.decls or []):
if not isinstance(d, pp.ParseResults):
continue
name = d.get("name")
init_expr = d.init if "init" in d else None # type: ignore[attr-defined]
items.append((name, init_expr))
return cls(dtype=dtype, decls=items)
def execute(self, engine: "TinyEngine") -> object | None: # noqa: F821 - forward ref
for name, init_expr in self.decls:
init_val = engine.eval_expr(init_expr) if init_expr is not None else None
engine.declare_var(name, self.dtype, init_val)
return None
@dataclass
| DeclStmtNode |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 920739,
"end": 926069
} | class ____(ValueChannelMixin, core.StringValueDefWithCondition):
"""
UrlValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "url"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> UrlValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefstringnullExprRef], /
) -> UrlValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| UrlValue |
python | mwaskom__seaborn | tests/test_base.py | {
"start": 15642,
"end": 19667
} | class ____:
def test_plotter_default_init(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
assert not hasattr(p, "_map_style")
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", style="a"),
)
assert isinstance(p._style_map, StyleMapping)
def test_plotter_customization(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", style="a"),
)
markers = ["s", "p", "h"]
style_order = ["b", "a", "c"]
p.map_style(markers=markers, order=style_order)
assert p._style_map.levels == style_order
assert p._style_map(style_order, "marker") == markers
def test_style_map_null(self, flat_series, null_series):
p = VectorPlotter(variables=dict(x=flat_series, style=null_series))
m = HueMapping(p)
assert m.levels is None
assert m.map_type is None
assert m.lookup_table is None
def test_map_style(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", style="a"),
)
# Test defaults
m = StyleMapping(p, markers=True, dashes=True)
n = len(m.levels)
for key, dashes in zip(m.levels, unique_dashes(n)):
assert m(key, "dashes") == dashes
actual_marker_paths = {
k: mpl.markers.MarkerStyle(m(k, "marker")).get_path()
for k in m.levels
}
expected_marker_paths = {
k: mpl.markers.MarkerStyle(m).get_path()
for k, m in zip(m.levels, unique_markers(n))
}
assert actual_marker_paths == expected_marker_paths
# Test lists
markers, dashes = ["o", "s", "d"], [(1, 0), (1, 1), (2, 1, 3, 1)]
m = StyleMapping(p, markers=markers, dashes=dashes)
for key, mark, dash in zip(m.levels, markers, dashes):
assert m(key, "marker") == mark
assert m(key, "dashes") == dash
# Test dicts
markers = dict(zip(p.plot_data["style"].unique(), markers))
dashes = dict(zip(p.plot_data["style"].unique(), dashes))
m = StyleMapping(p, markers=markers, dashes=dashes)
for key in m.levels:
assert m(key, "marker") == markers[key]
assert m(key, "dashes") == dashes[key]
# Test explicit categories
p = VectorPlotter(data=long_df, variables=dict(x="x", style="a_cat"))
m = StyleMapping(p)
assert m.levels == long_df["a_cat"].cat.categories.to_list()
# Test style order with defaults
order = p.plot_data["style"].unique()[[1, 2, 0]]
m = StyleMapping(p, markers=True, dashes=True, order=order)
n = len(order)
for key, mark, dash in zip(order, unique_markers(n), unique_dashes(n)):
assert m(key, "dashes") == dash
assert m(key, "marker") == mark
obj = mpl.markers.MarkerStyle(mark)
path = obj.get_path().transformed(obj.get_transform())
assert_array_equal(m(key, "path").vertices, path.vertices)
# Test too many levels with style lists
with pytest.warns(UserWarning):
StyleMapping(p, markers=["o", "s"], dashes=False)
with pytest.warns(UserWarning):
StyleMapping(p, markers=False, dashes=[(2, 1)])
# Test missing keys with style dicts
markers, dashes = {"a": "o", "b": "s"}, False
with pytest.raises(ValueError):
StyleMapping(p, markers=markers, dashes=dashes)
markers, dashes = False, {"a": (1, 0), "b": (2, 1)}
with pytest.raises(ValueError):
StyleMapping(p, markers=markers, dashes=dashes)
# Test mixture of filled and unfilled markers
markers, dashes = ["o", "x", "s"], None
with pytest.raises(ValueError):
StyleMapping(p, markers=markers, dashes=dashes)
| TestStyleMapping |
python | google__jax | jax/_src/lax/lax.py | {
"start": 321448,
"end": 343179
} | class ____:
def __init__(self, value_comparator: Callable[[Any, Any], Any]):
self._value_comparator = value_comparator
def __repr__(self):
# Override the repr so that the metadata attached to the lowered op does not
# contain unstable function ids. This plays more nicely with computation
# fingerprint calculation in the compilation cache.
return f'_ArgMinMaxReducer({self._value_comparator.__name__})'
def __call__(self, op_val_index, acc_val_index):
op_val, op_index = op_val_index
acc_val, acc_index = acc_val_index
# Pick op_val if Lt (for argmin) or if NaN
pick_op_val = bitwise_or(self._value_comparator(op_val, acc_val),
ne(op_val, op_val))
# If x and y are not NaN and x = y, then pick the first
pick_op_index = bitwise_or(pick_op_val,
bitwise_and(eq(op_val, acc_val),
lt(op_index, acc_index)))
return (select(pick_op_val, op_val, acc_val),
select(pick_op_index, op_index, acc_index))
def _compute_argminmax(value_comparator, get_identity,
operand, *, index_dtype, axes):
# value_comparator is either lax.lt (for argmin) or lax.gt
# get_identity(operand.dtype) is inf for argmin or -inf for argmax
axis, = axes
indices = broadcasted_iota(
index_dtype, np.shape(operand), axis,
out_sharding=operand.aval.sharding)
res = reduce([operand, indices],
[get_identity(operand.dtype), np.array(0, index_dtype)],
_ArgMinMaxReducer(value_comparator),
axes)
return res[1]
argmin_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmin', weak_type_rule=_strip_weak_type,
sharding_rule=_argminmax_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'argmin'))
batching.defreducer(argmin_p, _get_min_identity)
ad.defjvp_zero(argmin_p)
argmax_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmax', weak_type_rule=_strip_weak_type,
sharding_rule=_argminmax_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'argmax'))
batching.defreducer(argmax_p, _get_max_identity)
ad.defjvp_zero(argmax_p)
mlir.register_lowering(
argmin_p,
mlir.lower_fun(
partial(_compute_argminmax, lt, _get_min_identity),
multiple_results=False,
),
inline=False,
)
mlir.register_lowering(
argmax_p,
mlir.lower_fun(
partial(_compute_argminmax, gt, _get_max_identity),
multiple_results=False,
),
inline=False,
)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != np.bool_ and not np.issubdtype(operand.dtype, np.integer):
raise TypeError(f"logical reduction requires operand dtype bool or int, got {operand.dtype}.")
return tuple(np.delete(operand.shape, axes))
def _reduce_logical_sharding_rule(operand, *, axes):
return operand.sharding.update(spec=tuple_delete(operand.sharding.spec, axes))
def _reduce_or_lin(nzs, x, *, axes):
nz, = nzs
y = reduce_or_p.bind(x, axes=axes)
aval = core.typeof(y).to_tangent_aval()
return y, False, (), lambda _, t: ad_util.Zero(aval)
reduce_or_p = standard_primitive(
_reduce_logical_shape_rule, input_dtype, 'reduce_or',
weak_type_rule=_strip_weak_type, sharding_rule=_reduce_logical_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'reduce_or'))
batching.defreducer(reduce_or_p, _get_bitwise_or_identity)
ad.primitive_linearizations[reduce_or_p] = _reduce_or_lin
reduce_and_p = standard_primitive(
_reduce_logical_shape_rule, input_dtype, 'reduce_and',
weak_type_rule=_strip_weak_type, sharding_rule=_reduce_logical_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'reduce_and'))
batching.defreducer(reduce_and_p, _get_bitwise_and_identity)
batching.ragged_prop_rules[reduce_and_p] = batching.ragged_mask_elementwise_rule
reduce_xor_p = standard_primitive(
_reduce_logical_shape_rule, input_dtype, 'reduce_xor',
weak_type_rule=_strip_weak_type, sharding_rule=_reduce_logical_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'reduce_xor'))
batching.defreducer(reduce_xor_p, _get_bitwise_or_identity)
def _unary_reduce_lower(reducer, unit_factory, ctx, x, *, axes, **kwargs):
aval_out, = ctx.avals_out
dtype = aval_out.dtype
op = hlo.ReduceOp([mlir.aval_to_ir_type(aval_out)], [x],
[mlir.ir_constant(unit_factory(aval_out.dtype))],
mlir.dense_int_array(axes))
scalar_type = mlir.aval_to_ir_type(core.ShapedArray((), dtype))
reducer_region = op.regions[0].blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(reducer_region):
hlo.return_([reducer(*reducer_region.arguments)])
return [mlir.lower_with_sharding_in_types(ctx, op.result, aval_out)]
mlir.register_lowering(reduce_sum_p, partial(_unary_reduce_lower, hlo.AddOp,
_get_sum_identity))
mlir.register_lowering(reduce_prod_p, partial(_unary_reduce_lower, hlo.MulOp,
_get_prod_identity))
mlir.register_lowering(reduce_or_p, partial(_unary_reduce_lower, hlo.OrOp,
_get_bitwise_or_identity))
mlir.register_lowering(reduce_and_p, partial(_unary_reduce_lower, hlo.AndOp,
_get_bitwise_and_identity))
mlir.register_lowering(reduce_xor_p, partial(_unary_reduce_lower, hlo.XorOp,
_get_bitwise_or_identity))
mlir.register_lowering(reduce_min_p, partial(_unary_reduce_lower, mlir.min_hlo,
_get_min_identity))
mlir.register_lowering(reduce_max_p, partial(_unary_reduce_lower, mlir.max_hlo,
_get_max_identity))
def _reduce_precision_shape_rule(operand, *, exponent_bits, mantissa_bits):
exponent_bits = operator.index(exponent_bits)
mantissa_bits = operator.index(mantissa_bits)
if exponent_bits < 1:
raise ValueError(f"reduce_precision: exponent_bits must be positive; got {exponent_bits}")
if mantissa_bits < 0:
raise ValueError(f"reduce_precision: mantissa_bits must be non-negative; got {mantissa_bits}")
return operand.shape
def _reduce_precision_sharding_rule(operand, *, exponent_bits, mantissa_bits):
return operand.sharding
reduce_precision_p = standard_primitive(
_reduce_precision_shape_rule,
partial(unop_dtype_rule, _identity, _float, 'reduce_precision'),
name='reduce_precision', sharding_rule=_reduce_precision_sharding_rule,
vma_rule=partial(core.standard_vma_rule, 'reduce_precision'))
ad.deflinear(reduce_precision_p, lambda t, **kwargs: [reduce_precision_p.bind(t, **kwargs)])
batching.defvectorized(reduce_precision_p)
def _reduce_precision_lower(ctx, operand, *, exponent_bits, mantissa_bits):
aval_out, = ctx.avals_out
out = hlo.reduce_precision(operand, mlir.i32_attr(exponent_bits),
mlir.i32_attr(mantissa_bits))
return [mlir.lower_with_sharding_in_types(ctx, out, aval_out)]
mlir.register_lowering(reduce_precision_p, _reduce_precision_lower)
_UINT_DTYPES = {
16: np.dtype(np.uint16),
32: np.dtype(np.uint32),
64: np.dtype(np.uint64),
}
_INT_DTYPES = {
16: np.dtype(np.int16),
32: np.dtype(np.int32),
64: np.dtype(np.int64),
}
def _sort_abstract_eval(*args, **kwargs):
args = tuple(args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _canonicalize_float_for_sort(x):
# In the sort comparator, we are going to use a comparison operator where -0
# would be before 0, and -NaN and NaN appear at the beginning and end of the
# ordering. In this scheme, -0 would be before 0, and -NaN and NaN appear at
# the beginning and end of the ordering. This causes issues for stable
# sorts, so we avoid this by standardizing the representation of zeros
# and NaNs in the output.
result = select(eq(x, _zero(x)), _zeros(x), x)
with config.debug_nans(False):
result = select(_isnan(x), full_like(result, np.nan), result)
return result
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
# -infinity < ... < 0 < ... < infinity < NaN.
# 0.0 and -0.0 are treated as equivalent, as are all NaN representations.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support and lexicographic ordering to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands, num_keys=1):
x_keys, y_keys = _operands_to_keys(*operands, num_keys=num_keys)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
xk, yk = core.standard_insert_pvary(xk, yk)
p = (bitwise_or(lt_to_p.bind(xk, yk), bitwise_and(eq_to_p.bind(xk, yk), p)) if p is not None
else lt_to_p.bind(xk, yk))
return p
# Similar to sort_lt_comparator, but implements less than or equal. Used by
# the searchsorted() implementation.
def _sort_le_comparator(*operands, num_keys=1):
x_keys, y_keys = _operands_to_keys(*operands, num_keys=num_keys)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
xk, yk = core.standard_insert_pvary(xk, yk)
p = (bitwise_or(lt_to_p.bind(xk, yk), bitwise_and(eq_to_p.bind(xk, yk), p)) if p is not None
else le_to_p.bind(xk, yk))
return p
def _operands_to_keys(*operands, num_keys=1):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
assert len(operands) // 2 >= num_keys, (operands, num_keys)
x_keys, y_keys = [], []
for x, y in zip(operands[:2*num_keys:2], operands[1:2*num_keys:2]):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if dtypes.issubdtype(x.dtype, np.complexfloating):
x_keys.extend([_canonicalize_float_for_sort(real(x)), _canonicalize_float_for_sort(imag(x))])
y_keys.extend([_canonicalize_float_for_sort(real(y)), _canonicalize_float_for_sort(imag(y))])
elif dtypes.issubdtype(x.dtype, np.floating):
x_keys.append(_canonicalize_float_for_sort(x))
y_keys.append(_canonicalize_float_for_sort(y))
else:
x_keys.append(x)
y_keys.append(y)
return x_keys, y_keys
def _sort_jvp(primals, tangents, *, dimension, is_stable, num_keys):
shape = primals[0].shape
index_dtype = lax_utils.int_dtype_for_shape(shape, signed=False)
sorted_primals_and_idx = sort_p.bind(
*primals,
broadcasted_iota(index_dtype, shape, dimension),
dimension=dimension, is_stable=is_stable, num_keys=num_keys)
batch_dims = tuple(np.delete(np.arange(len(shape), dtype=np.int64),
dimension))
dnums = slicing.GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=(dimension,),
start_index_map=(dimension,),
operand_batching_dims=batch_dims,
start_indices_batching_dims=batch_dims,
)
idx = expand_dims(sorted_primals_and_idx[-1], (len(shape),))
gather_idx = partial(
slicing.gather,
start_indices=idx, dimension_numbers=dnums, slice_sizes=(1,) * len(shape),
mode=slicing.GatherScatterMode.PROMISE_IN_BOUNDS
)
tangents_out = [t if type(t) is ad_util.Zero else gather_idx(t)
for t in tangents]
return tuple(sorted_primals_and_idx[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension, is_stable, num_keys):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = np.delete(np.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return (sort_p.bind(*new_args, dimension=new_dimension, is_stable=is_stable, num_keys=num_keys),
bdims)
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(dispatch.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _sort_lower(ctx, *operands, dimension, is_stable, num_keys):
assert all(isinstance(x, core.ShapedArray) for x in ctx.avals_in), ctx.avals_in
sort = hlo.SortOp([mlir.aval_to_ir_type(aval) for aval in ctx.avals_out],
mlir.flatten_ir_values(operands),
dimension=mlir.i64_attr(dimension),
is_stable=ir.BoolAttr.get(is_stable))
scalar_s = lambda a: a.sharding.update(spec=P())
scalar_avals = [aval.update(shape=(), sharding=scalar_s(aval))
for aval in ctx.avals_in]
scalar_types = safe_map(mlir.aval_to_ir_type, scalar_avals)
comparator = sort.comparator.blocks.append(
*util.flatten(zip(scalar_types, scalar_types)))
with ir.InsertionPoint(comparator):
lower_comparator = mlir.lower_fun(partial(_sort_lt_comparator),
multiple_results=False)
sub_ctx = ctx.replace(primitive=None,
avals_in=util.flatten(zip(scalar_avals, scalar_avals)),
avals_out=[core.ShapedArray((), np.bool_)])
out = lower_comparator(sub_ctx, *comparator.arguments, num_keys=num_keys)
hlo.return_(mlir.flatten_ir_values(out))
return sort.results
mlir.register_lowering(sort_p, _sort_lower)
def _top_k_abstract_eval(operand, *, k, axis):
if dtypes.issubdtype(operand.dtype, np.complexfloating):
raise ValueError("top_k is not compatible with complex inputs.")
if k < 0:
raise ValueError(f"k argument to top_k must be nonnegative, got {k}")
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
if not (0 <= axis < len(operand.shape)):
raise ValueError(f"axis argument out of range: {axis=} for {operand.shape=}")
shape = list(operand.shape)
if shape[axis] < k:
raise ValueError("k argument to top_k must be no larger than size along axis;"
f" got {k=} with {shape=} and {axis=}")
int32_max = dtypes.iinfo('int32').max
try:
too_large = (shape[axis] > int32_max + 1)
except core.InconclusiveDimensionOperation:
pass
else:
if too_large:
raise ValueError("top_k returns int32 indices, which will overflow for array dimensions "
f"larger than the maximum int32 ({int32_max}). Got {operand.shape=}")
shape[axis] = k
return (operand.update(shape=shape, dtype=operand.dtype,
weak_type=operand.weak_type),
operand.update(shape=shape, dtype=np.dtype(np.int32)))
def _top_k_jvp(primals, tangents, *, k, axis):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k, axis=axis)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_primal_value(primals_out[0])
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = reshape(k_idxs, gather_index_shape)
slice_sizes = (1,) * rank
dnums = slicing.GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=(axis,),
operand_batching_dims=tuple(i for i in range(rank) if i != axis),
start_indices_batching_dims=tuple(i for i in range(rank) if i != axis),
start_index_map=(axis,),
)
tangent_out = slicing.gather(tangent, gather_indices, dnums, slice_sizes)
return primals_out, (tangent_out, ad_util.Zero.from_primal_value(primals_out[1]))
def _top_k_batch_rule(batched_args, batch_dims, *, k, axis):
operand, = batched_args
bdim, = batch_dims
if bdim <= axis:
axis += 1
return top_k(operand, k=k, axis=axis), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(dispatch.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
def _top_k_lower(ctx, operand, k, axis):
# Move axis to last dimension:
ndim = len(ctx.avals_in[0].shape)
if axis != ndim - 1:
perm = list(range(ndim))
perm[axis], perm[-1] = perm[-1], perm[axis]
operand = hlo.transpose(operand, mlir.dense_int_array(perm))
else:
perm = None
# Compute the top-k along the last dimension
if core.is_constant_dim(k):
results = chlo.TopKOp(operand, mlir.i64_attr(k)).results
else:
k_value, = mlir.eval_dynamic_shape_as_vals(ctx, (k,))
out_values_aval, out_indices_aval, = ctx.avals_out
results = mlir.custom_call(
"stablehlo.dynamic_top_k",
result_types=[mlir.aval_to_ir_type(out_values_aval),
mlir.aval_to_ir_type(out_indices_aval)],
operands=[operand, k_value]).results
# Move last dimension back into place
if perm is not None:
results = [hlo.transpose(result, mlir.dense_int_array(perm))
for result in results]
return results
mlir.register_lowering(top_k_p, _top_k_lower)
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.Zero.from_primal_value(x)
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
pe.def_trivial_padding(ad_util.stop_gradient_p)
def create_token(_=None):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
The argument is ignored. It exists for backward compatibility.
"""
return create_token_p.bind()
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(dispatch.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda *_: abstract_token)
def _create_token_lowering(ctx, *operands):
aval_out, = ctx.avals_out
return [hlo.create_token()]
mlir.register_lowering(create_token_p, _create_token_lowering)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA after all operator."""
operands = core.standard_insert_pvary(*operands)
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(dispatch.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
def _after_all_lowering(ctx, *operands):
aval_out, = ctx.avals_out
return [hlo.after_all(operands)]
mlir.register_lowering(after_all_p, _after_all_lowering)
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b). If
b <= a, then the result is undefined, and different implementations may
return different results.
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
a, b = core.standard_insert_pvary(a, b)
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return a.update(shape=shape, dtype=a.dtype,
weak_type=(a.weak_type and b.weak_type))
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(dispatch.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
def _rng_uniform_lowering(ctx, a, b, *, shape):
aval_out, = ctx.avals_out
shape = mlir.ir_constant(np.array(aval_out.shape, np.int64))
return [hlo.rng(a, b, shape, hlo.RngDistributionAttr.get('UNIFORM'))]
mlir.register_lowering(rng_uniform_p, _rng_uniform_lowering)
def _rng_bit_generator_shape_rule(key, *, shape, dtype, algorithm, out_sharding):
del dtype, algorithm
return (key.shape, tuple(shape))
def _rng_bit_generator_sharding_rule(key, *, shape, dtype, algorithm,
out_sharding):
return (key.sharding, out_sharding)
def _rng_bit_generator_vma_rule(key, *, shape, dtype, algorithm, out_sharding):
return (key.vma, frozenset())
def _rng_bit_generator_dtype_rule(key, *, shape, dtype, algorithm, out_sharding):
del shape, algorithm
return (key.dtype, dtype)
def _rng_bit_generator_weak_type_rule(key, *, shape, dtype, algorithm,
out_sharding):
del shape, dtype, algorithm
return (key.weak_type, False)
| _ArgMinMaxReducer |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 11962,
"end": 14947
} | class ____:
"""Make sure all options which have an attribute interface work as they should.
Also make sure option settings are copied correctly when a table is cloned by
slicing."""
def test_set_for_all_columns(self, city_data: PrettyTable) -> None:
city_data.field_names = sorted(city_data.field_names)
city_data.align = "l"
city_data.max_width = 10
city_data.start = 2
city_data.end = 4
city_data.sortby = "Area"
city_data.reversesort = True
city_data.header = True
city_data.border = False
city_data.hrules = HRuleStyle.ALL
city_data.int_format = "4"
city_data.float_format = "2.2"
city_data.padding_width = 2
city_data.left_padding_width = 2
city_data.right_padding_width = 2
city_data.vertical_char = "!"
city_data.horizontal_char = "~"
city_data.junction_char = "*"
city_data.top_junction_char = "@"
city_data.bottom_junction_char = "#"
city_data.right_junction_char = "$"
city_data.left_junction_char = "%"
city_data.top_right_junction_char = "^"
city_data.top_left_junction_char = "&"
city_data.bottom_right_junction_char = "("
city_data.bottom_left_junction_char = ")"
city_data.format = True
city_data.attributes = {"class": "prettytable"}
assert city_data.get_string() == city_data[:].get_string()
def test_set_for_one_column(self, city_data: PrettyTable) -> None:
city_data.align["Rainfall"] = "l"
city_data.max_width["Name"] = 10
city_data.int_format["Population"] = "4"
city_data.float_format["Area"] = "2.2"
assert city_data.get_string() == city_data[:].get_string()
def test_preserve_internal_border(self) -> None:
table = PrettyTable(preserve_internal_border=True)
assert table.preserve_internal_border is True
def test_internal_border_preserved(self, helper_table: PrettyTable) -> None:
helper_table.border = False
helper_table.preserve_internal_border = True
assert (
helper_table.get_string().strip()
== """
| Field 1 | Field 2 | Field 3
---+---------+---------+---------
1 | value 1 | value2 | value3
4 | value 4 | value5 | value6
7 | value 7 | value8 | value9
""".strip() # noqa: W291
)
@pytest.fixture(scope="module")
def db_cursor() -> Generator[sqlite3.Cursor]:
conn = sqlite3.connect(":memory:")
cur = conn.cursor()
yield cur
cur.close()
conn.close()
@pytest.fixture(scope="module")
def init_db(db_cursor: sqlite3.Cursor) -> Generator[Any]:
db_cursor.execute(
"CREATE TABLE cities "
"(name TEXT, area INTEGER, population INTEGER, rainfall REAL)"
)
for row in CITY_DATA:
db_cursor.execute(f"INSERT INTO cities VALUES {tuple(row)}")
yield
db_cursor.execute("DROP TABLE cities")
| TestOptionAttribute |
python | pytorch__pytorch | test/dynamo/test_guard_manager.py | {
"start": 32754,
"end": 33839
} | class ____(torch._dynamo.test_case.TestCase):
def test_duplicate_guard(self):
class Foo:
def __init__(self):
self.x = 4
self.bar = 4
foo = Foo()
def fn(x):
if hasattr(foo, "y"):
x = torch.sin(x)
if hasattr(foo, "y"):
x = torch.sin(x)
if hasattr(foo, "bar"):
x = torch.cos(x)
if hasattr(foo, "bar"):
x = torch.cos(x)
return x + foo.x
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
guard_str = str(guard_wrapper)
# One for tensor and one for y
self.assertEqual(guard_str.count("NO_HASATTR"), 2)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(torch.randn(4, 4))
| DuplicateGuardTest |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/manifest_component_transformer.py | {
"start": 7089,
"end": 12539
} | class ____:
def propagate_types_and_parameters(
self,
parent_field_identifier: str,
declarative_component: Mapping[str, Any],
parent_parameters: Mapping[str, Any],
) -> Mapping[str, Any]:
"""
Recursively transforms the specified declarative component and subcomponents to propagate parameters and insert the
default component type if it was not already present. The resulting transformed components are a deep copy of the input
components, not an in-place transformation.
:param declarative_component: The current component that is having type and parameters added
:param parent_field_identifier: The name of the field of the current component coming from the parent component
:param parent_parameters: The parameters set on parent components defined before the current component
:return: A deep copy of the transformed component with types and parameters persisted to it
"""
propagated_component = dict(copy.deepcopy(declarative_component))
if "type" not in propagated_component:
# If the component has class_name we assume that this is a reference to a custom component. This is a slight change to
# existing behavior because we originally allowed for either class or type to be specified. After the pydantic migration,
# class_name will only be a valid field on custom components and this change reflects that. I checked, and we currently
# have no low-code connectors that use class_name except for custom components.
if "class_name" in propagated_component:
found_type = CUSTOM_COMPONENTS_MAPPING.get(parent_field_identifier)
else:
found_type = DEFAULT_MODEL_TYPES.get(parent_field_identifier)
if found_type:
propagated_component["type"] = found_type
# When there is no resolved type, we're not processing a component (likely a regular object) and don't need to propagate parameters
# When the type refers to a json schema, we're not processing a component as well. This check is currently imperfect as there could
# be json_schema are not objects but we believe this is not likely in our case because:
# * records are Mapping so objects hence SchemaLoader root should be an object
# * connection_specification is a Mapping
if "type" not in propagated_component or self._is_json_schema_object(propagated_component):
return propagated_component
component_type = propagated_component.get("type", "")
model_class = COMPONENT_TYPE_REGISTY.get(component_type)
# Grab the list of expected fields for the component type
valid_fields = get_model_fields(model_class)
# Combines parameters defined at the current level with parameters from parent components. Parameters at the current
# level take precedence
current_parameters = dict(copy.deepcopy(parent_parameters))
component_parameters = propagated_component.pop(PARAMETERS_STR, {})
current_parameters = {**current_parameters, **component_parameters}
# Parameters should be applied to the current component fields with the existing field taking precedence over parameters if
# both exist
for parameter_key, parameter_value in current_parameters.items():
if parameter_key in valid_fields:
propagated_component[parameter_key] = propagated_component.get(parameter_key) or parameter_value
for field_name, field_value in propagated_component.items():
if isinstance(field_value, dict):
# We exclude propagating a parameter that matches the current field name because that would result in an infinite cycle
excluded_parameter = current_parameters.pop(field_name, None)
parent_type_field_identifier = f"{propagated_component.get('type')}.{field_name}"
propagated_component[field_name] = self.propagate_types_and_parameters(
parent_type_field_identifier, field_value, current_parameters
)
if excluded_parameter:
current_parameters[field_name] = excluded_parameter
elif isinstance(field_value, typing.List):
# We exclude propagating a parameter that matches the current field name because that would result in an infinite cycle
excluded_parameter = current_parameters.pop(field_name, None)
for i, element in enumerate(field_value):
if isinstance(element, dict):
parent_type_field_identifier = f"{propagated_component.get('type')}.{field_name}"
field_value[i] = self.propagate_types_and_parameters(parent_type_field_identifier, element, current_parameters)
if excluded_parameter:
current_parameters[field_name] = excluded_parameter
return propagated_component
@staticmethod
def _is_json_schema_object(propagated_component: Mapping[str, Any]) -> bool:
component_type = propagated_component.get("type")
if isinstance(component_type, list):
# Handle nullable types, ie ["null", "object"]
return "object" in component_type
return component_type == "object"
| ManifestComponentTransformer |
python | getsentry__sentry | src/sentry/notifications/platform/templates/sample.py | {
"start": 9682,
"end": 12102
} | class ____(NotificationTemplate[PerformanceAlertData]):
category = NotificationCategory.DEBUG
example_data = PerformanceAlertData(
metric_name="API response time",
threshold="500ms",
current_value="1.2s",
project_name="my-app",
chart_url="https://example.com/chart",
investigation_url="https://example.com/investigate",
)
def render(self, data: PerformanceAlertData) -> NotificationRenderedTemplate:
return NotificationRenderedTemplate(
subject=f"Performance Alert: {data.metric_name} threshold exceeded",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"Performance alert triggered for {data.metric_name} in project {data.project_name}. ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"The current value of {data.current_value} exceeds the threshold of {data.threshold}. ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="Immediate investigation is recommended to identify and resolve the performance degradation.",
)
],
),
],
actions=[
NotificationRenderedAction(
label="Investigate Performance", link="https://example.com/investigate"
)
],
chart=NotificationRenderedImage(
url="https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/buff-bufo.png",
alt_text="Performance metrics chart",
),
)
@dataclass(frozen=True)
| PerformanceAlertNotificationTemplate |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 1559,
"end": 1666
} | class ____(Proto_CoGeneric):
def m[T: Impl_CoGenericExplicit3](self: T) -> T: ...
| Impl_CoGenericExplicit3 |
python | dask__dask | dask/array/tests/test_dispatch.py | {
"start": 2767,
"end": 7200
} | class ____(np.lib.mixins.NDArrayOperatorsMixin):
"""
Another mock duck array class (like EncapsulateNDArray), but
designed to be above Dask in the type casting hierarchy (that is,
WrappedArray wraps Dask Array) and be even more minimal in API.
Tests that Dask defers properly to upcast types.
"""
def __init__(self, arr, **attrs):
self.arr = arr
self.attrs = attrs
def __array__(self, *args, **kwargs):
return np.asarray(self.arr, *args, **kwargs)
def _downcast_args(self, args):
for arg in args:
if isinstance(arg, type(self)):
yield arg.arr
elif isinstance(arg, (tuple, list)):
yield type(arg)(self._downcast_args(arg))
else:
yield arg
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
inputs = tuple(self._downcast_args(inputs))
return type(self)(getattr(ufunc, method)(*inputs, **kwargs), **self.attrs)
def __array_function__(self, func, types, args, kwargs):
args = tuple(self._downcast_args(args))
return type(self)(func(*args, **kwargs), **self.attrs)
def __dask_graph__(self):
# Note: make sure that dask dusk arrays do not interfere with the
# dispatch mechanism. The return value here, doesn't matter.
return ...
shape = dispatch_property("shape")
ndim = dispatch_property("ndim")
dtype = dispatch_property("dtype")
def __getitem__(self, key):
return type(self)(self.arr[key], **self.attrs)
def __setitem__(self, key, value):
self.arr[key] = value
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.eq,
operator.gt,
operator.ge,
operator.lt,
operator.le,
operator.mod,
operator.mul,
operator.ne,
operator.pow,
operator.sub,
operator.truediv,
operator.floordiv,
np.add,
np.subtract,
],
)
@pytest.mark.parametrize(
"arr_upcast, arr_downcast",
[
(
WrappedArray(np.random.default_rng().random((10, 10))),
da.random.default_rng().random((10, 10), chunks=(5, 5)),
),
(
da.random.default_rng().random((10, 10), chunks=(5, 5)),
EncapsulateNDArray(np.random.default_rng().random((10, 10))),
),
(
WrappedArray(np.random.default_rng().random((10, 10))),
EncapsulateNDArray(np.random.default_rng().random((10, 10))),
),
],
)
def test_binary_operation_type_precedence(op, arr_upcast, arr_downcast):
"""Test proper dispatch on binary operators and NumPy ufuncs"""
assert (
type(op(arr_upcast, arr_downcast))
== type(op(arr_downcast, arr_upcast))
== type(arr_upcast)
)
@pytest.mark.parametrize(
"arr, result",
[
(WrappedArray(np.arange(4)), False),
(da.from_array(np.arange(4)), False),
(EncapsulateNDArray(np.arange(4)), True),
(np.ma.masked_array(np.arange(4), [True, False, True, False]), True),
(np.arange(4), True),
(None, True),
# float/int/str scalars are not valid array chunks,
# but ops on float/int/str etc scalars do get handled
# by Dask
(0.0, False),
(0, False),
("", False),
],
)
def test_is_valid_array_chunk(arr, result):
"""Test is_valid_array_chunk for correctness"""
assert is_valid_array_chunk(arr) is result
@pytest.mark.parametrize(
"arr_type, result",
[
(WrappedArray, False),
(da.Array, False),
(EncapsulateNDArray, True),
(np.ma.MaskedArray, True),
(np.ndarray, True),
(float, False),
(int, False),
],
)
def test_is_valid_chunk_type(arr_type, result):
"""Test is_valid_chunk_type for correctness"""
assert is_valid_chunk_type(arr_type) is result
def test_direct_deferral_wrapping_override():
"""Directly test Dask deferring to an upcast type and the ability to still wrap it."""
a = da.from_array(np.arange(4))
b = WrappedArray(np.arange(4))
assert a.__add__(b) is NotImplemented
# Note: remove dask_graph to be able to wrap b in a dask array
b.__dask_graph__ = None
res = a + da.from_array(b)
assert isinstance(res, da.Array)
assert_eq(res, 2 * np.arange(4), check_type=False)
| WrappedArray |
python | patrys__httmock | tests.py | {
"start": 11844,
"end": 13969
} | class ____(unittest.TestCase):
@staticmethod
def several_calls(count, method, *args, **kwargs):
results = []
for _ in range(count):
results.append(method(*args, **kwargs))
return results
def test_several_calls(self):
with HTTMock(google_mock_count, facebook_mock_count):
results = self.several_calls(
3, requests.get, 'http://facebook.com/')
self.assertTrue(facebook_mock_count.call['called'])
self.assertEqual(facebook_mock_count.call['count'], 3)
self.assertFalse(google_mock_count.call['called'])
self.assertEqual(google_mock_count.call['count'], 0)
for r in results:
self.assertEqual(r.content, b'Hello from Facebook')
# Negative case: cleanup call data
with HTTMock(facebook_mock_count):
results = self.several_calls(
1, requests.get, 'http://facebook.com/')
self.assertEqual(facebook_mock_count.call['count'], 1)
@with_httmock(google_mock_count, facebook_mock_count)
def test_several_call_decorated(self):
results = self.several_calls(3, requests.get, 'http://facebook.com/')
self.assertTrue(facebook_mock_count.call['called'])
self.assertEqual(facebook_mock_count.call['count'], 3)
self.assertFalse(google_mock_count.call['called'])
self.assertEqual(google_mock_count.call['count'], 0)
for r in results:
self.assertEqual(r.content, b'Hello from Facebook')
self.several_calls(1, requests.get, 'http://facebook.com/')
self.assertEqual(facebook_mock_count.call['count'], 4)
def test_store_several_requests(self):
with HTTMock(google_mock_store_requests):
payload = {"query": "foo"}
requests.post('http://google.com', data=payload)
self.assertTrue(google_mock_store_requests.call['called'])
self.assertEqual(google_mock_store_requests.call['count'], 1)
request = google_mock_store_requests.call['requests'][0]
self.assertEqual(request.body, 'query=foo')
| RememberCalledTest |
python | getsentry__sentry | src/sentry_plugins/heroku/plugin.py | {
"start": 916,
"end": 5175
} | class ____(ReleaseHook):
def get_auth(self) -> AuthenticatedToken | None:
try:
return AuthenticatedToken.from_token(
ApiKey(organization_id=self.project.organization_id, scope_list=["project:write"])
)
except ApiKey.DoesNotExist:
return None
def is_valid_signature(self, body, heroku_hmac):
secret = ProjectOption.objects.get_value(project=self.project, key="heroku:webhook_secret")
if secret is None:
return False
computed_hmac = base64.b64encode(
hmac.new(
key=secret.encode("utf-8"),
msg=body.encode("utf-8"),
digestmod=sha256,
).digest()
).decode("utf-8")
return hmac.compare_digest(heroku_hmac, computed_hmac)
def handle(self, request: HttpRequest) -> HttpResponse | None:
heroku_hmac = request.headers.get("Heroku-Webhook-Hmac-SHA256")
if not self.is_valid_signature(request.body.decode("utf-8"), heroku_hmac):
logger.info("heroku.webhook.invalid-signature", extra={"project_id": self.project.id})
return HttpResponse(status=401)
body = json.loads(request.body)
data = body.get("data")
email = data.get("user", {}).get("email") or data.get("actor", {}).get("email")
users = user_service.get_many_by_email(
emails=[email],
organization_id=self.project.organization_id,
is_verified=False,
)
user = users[0] if users else None
if user is None:
logger.info(
"owner.missing",
extra={
"organization_id": self.project.organization_id,
"project_id": self.project.id,
"email": email,
},
)
slug = data.get("slug")
if not slug:
logger.info("heroku.payload.missing-commit", extra={"project_id": self.project.id})
return HttpResponse(status=401)
commit = slug.get("commit")
app_name = data.get("app", {}).get("name")
if body.get("action") == "update":
if app_name:
self.finish_release(
version=commit,
url=f"http://{app_name}.herokuapp.com",
owner_id=user.id if user else None,
)
else:
self.finish_release(version=commit, owner_id=user.id if user else None)
return None
def set_refs(self, release, **values):
if not values.get("owner_id", None):
return
# check if user exists, and then try to get refs based on version
repo_project_option = ProjectOption.objects.get_value(
project=self.project, key="heroku:repository"
)
deploy_project_option = (
ProjectOption.objects.get_value(
project=self.project, key="heroku:environment", default="production"
)
or "production"
)
if repo_project_option:
try:
repository = Repository.objects.get(
organization_id=self.project.organization_id, name=repo_project_option
)
except Repository.DoesNotExist:
logger.info(
"repository.missing",
extra={
"organization_id": self.project.organization_id,
"project_id": self.project.id,
"repository": repo_project_option,
},
)
else:
release.set_refs(
refs=[{"commit": release.version, "repository": repository.name}],
user_id=values["owner_id"],
fetch=True,
)
# create deploy associated with release via ReleaseDeploysEndpoint
serializer = DeploySerializer(
data={"environment": deploy_project_option},
context={"organization": self.project.organization},
)
assert serializer.is_valid()
create_deploy(self.project.organization, release, serializer)
| HerokuReleaseHook |
python | getsentry__sentry | src/sentry/migrations/1001_prevent_grouphistory_infinte_recursion.py | {
"start": 222,
"end": 1662
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "1000_add_project_distribution_scope"),
]
operations = [
migrations.AlterField(
model_name="grouphistory",
name="prev_history",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="sentry.grouphistory"
),
),
]
| Migration |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1211680,
"end": 1211899
} | class ____(VegaLiteSchema):
"""SingleDefUnitChannel schema wrapper."""
_schema = {"$ref": "#/definitions/SingleDefUnitChannel"}
def __init__(self, *args):
super().__init__(*args)
| SingleDefUnitChannel |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 47787,
"end": 59548
} | class ____(Request):
"""
Edit an existing model
:param model: Model ID
:type model: str
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks
:type ready: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
:param iteration: Iteration (used to update task statistics)
:type iteration: int
:param metadata: Model metadata
:type metadata: list
"""
_service = "models"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.",
"type": "string",
},
"iteration": {
"description": "Iteration (used to update task statistics)",
"type": "integer",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"model": {"description": "Model ID", "type": "string"},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self,
model: str,
uri: Optional[str] = None,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = None,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
iteration: Optional[int] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(EditRequest, self).__init__(**kwargs)
self.model = model
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.project = project
self.parent = parent
self.task = task
self.iteration = iteration
self.metadata = metadata
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("uri")
def uri(self) -> Optional[str]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[str]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
| EditRequest |
python | google__jax | jax/_src/shard_map.py | {
"start": 57865,
"end": 88721
} | class ____(core.Tracer):
vma: frozenset[AxisName]
val: JaxType
def __init__(self, trace, vma, val):
self._trace = trace
if isinstance(vma, set):
vma = frozenset(vma)
assert isinstance(vma, frozenset)
self.vma = vma
self.val = val
@property
def aval(self):
aval = core.get_aval(self.val)
vma = self.vma if self._trace.check else self._trace.manual_axes
size = prod(self._trace.mesh.shape[n] for n in vma)
out = core.mapped_aval(size, 0, aval)
new_sharding = NamedSharding(
_as_manual_mesh(self._trace.amesh, self._trace.manual_axes),
out.sharding.spec) # pytype: disable=attribute-error
vma = self.vma if config._check_vma.value else frozenset()
return out.update(sharding=new_sharding, vma=vma)
def to_concrete_value(self):
if self._trace.check and self.vma == frozenset():
with core.eval_context(), use_abstract_mesh(self._trace.amesh):
return core.to_concrete_value(self.val[0])
else:
return None
def __str__(self) -> str:
pb_names = set(self._trace.mesh.axis_names) - self.vma
self = pvary(self, tuple(pb_names))
with core.eval_context(), use_abstract_mesh(self._trace.amesh):
blocks = list(self.val)
mesh = self._trace.mesh
axis_names = f"({', '.join(map(str, mesh.axis_names))},)"
return '\n'.join(
f"On {device} at mesh coordinates {axis_names} = {idx}:\n{block}\n"
for (idx, device), block in zip(np.ndenumerate(mesh.devices), blocks))
__repr__ = __str__ # for debuggers, like `p x`
def _prim_applier(prim, check_vma, params_tup, concrete_mesh, manual_axes,
in_specs, out_specs, *args):
def apply(*args):
outs = prim.bind(*map(_rem_singleton, args), **dict(params_tup))
return tree_map(_add_singleton, outs)
out_specs = list(out_specs) if type(out_specs) is tuple else out_specs
return shard_map(apply, mesh=concrete_mesh, in_specs=in_specs,
out_specs=out_specs, check_vma=check_vma,
axis_names=manual_axes)(*args)
eager_rules: dict[core.Primitive, Callable] = {}
def _device_put_eager_rule(mesh, *xs, srcs, devices, copy_semantics):
del mesh, srcs, copy_semantics
for device in devices:
if device is not None:
raise ValueError("device_put with explicit device not allowed within "
f"shard_map-decorated functions, but got device {device}")
return xs
eager_rules[dispatch.device_put_p] = _device_put_eager_rule
# Batching
def _shard_map_batch(
trace: batching.BatchTrace, prim: core.Primitive, fun: lu.WrappedFun,
in_tracers: Sequence[batching.BatchTracer], mesh: Mesh,
in_specs, out_specs_thunk, check_vma: bool, manual_axes: frozenset
) -> Sequence[batching.BatchTracer]:
in_vals, in_dims = unzip2(map(trace.to_batch_info, in_tracers))
if any(isinstance(d, batching.RaggedAxis) for d in in_dims):
raise NotImplementedError
spmd_axis_name = trace.axis_data.spmd_name
explicit_mesh_axis = trace.axis_data.explicit_mesh_axis
if spmd_axis_name is not None:
used = {n for spec in in_specs for n in _spec_to_vma(spec)}
if not config.disable_vmap_shmap_error.value and set(spmd_axis_name) & used:
raise ValueError("vmap spmd_axis_name cannot appear in shard_map in_specs")
new_in_specs = [
sp if d is batching.not_mapped else pxla.batch_spec(sp, d, spmd_axis_name)
for sp, d in zip(in_specs, in_dims)]
new_size = trace.axis_data.size // prod(mesh.shape[n] for n in spmd_axis_name)
new_axis_data = batching.AxisData(
trace.axis_data.name, new_size, trace.axis_data.spmd_name,
trace.axis_data.explicit_mesh_axis)
elif explicit_mesh_axis is not None:
used = {n for spec in in_specs for n in _spec_to_vma(spec)}
if set(explicit_mesh_axis) & used:
raise ValueError("vmapped away explicit mesh axis cannot appear in "
"shard_map in_specs")
new_in_specs = [
sp if d is batching.not_mapped else pxla.batch_spec(sp, d, None)
for sp, d in zip(in_specs, in_dims)]
new_axis_data = trace.axis_data
else:
new_in_specs = [sp if d is batching.not_mapped else pxla.batch_spec(sp, d, None)
for sp, d in zip(in_specs, in_dims)]
new_axis_data = trace.axis_data
fun, out_dims = batching.batch_subtrace(
fun, trace.tag, new_axis_data, tuple(in_dims))
@as_hashable_function(closure=out_specs_thunk)
def new_out_specs_thunk():
return _batch_out_specs(spmd_axis_name, explicit_mesh_axis, out_dims(),
out_specs_thunk())
new_params = dict(mesh=mesh, in_specs=new_in_specs,
out_specs_thunk=new_out_specs_thunk, check_vma=check_vma,
manual_axes=manual_axes)
with core.set_current_trace(trace.parent_trace):
out_vals = prim.bind(fun, *in_vals, **new_params)
make_tracer = partial(batching.BatchTracer, trace,
source_info=source_info_util.current())
return map(make_tracer, out_vals, out_dims())
batching.BatchTrace.process_shard_map = _shard_map_batch
def _batch_out_specs(spmd_name, explicit_mesh_axis, dims, out_specs):
if spmd_name is not None:
used = {n for spec in out_specs for n in _spec_to_vma(spec)}
if not config.disable_vmap_shmap_error.value and set(spmd_name) & used:
raise ValueError("vmap spmd_axis_name cannot appear in shard_map out_specs")
return [sp if d is batching.not_mapped else pxla.batch_spec(sp, d, spmd_name)
for sp, d in zip(out_specs, dims)]
elif explicit_mesh_axis is not None:
used = {n for spec in out_specs for n in _spec_to_vma(spec)}
if set(explicit_mesh_axis) & used:
raise ValueError("vmapped away explicit mesh axis cannot appear in "
"shard_map out_specs")
return [sp if d is batching.not_mapped else pxla.batch_spec(sp, d, None)
for sp, d in zip(out_specs, dims)]
else:
return [sp if d is batching.not_mapped else pxla.batch_spec(sp, d, None)
for sp, d in zip(out_specs, dims)]
# Autodiff
def _shard_map_jvp(trace, shard_map_p, f: lu.WrappedFun, tracers, mesh, in_specs,
out_specs_thunk, check_vma, manual_axes):
f = f.with_unknown_names()
primals, tangents = unzip2(map(trace.to_primal_tangent_pair, tracers))
which_nz = [ type(t) is not ad.Zero for t in tangents]
tangents = [t if type(t) is not ad.Zero else None for t in tangents]
args, in_tree = tree_flatten((primals, tangents))
f_jvp = ad.jvp_subtrace(f, trace.tag)
f_jvp, which_nz_out = ad.nonzero_tangent_outputs(f_jvp)
tangent_in_specs = [sp for sp, nz in zip(in_specs, which_nz) if nz]
@as_hashable_function(closure=out_specs_thunk)
def new_out_specs_thunk():
out_ax = out_specs_thunk()
return (*out_ax, *(ax for ax, nz in zip(out_ax, which_nz_out()) if nz))
params = dict(mesh=mesh, in_specs=(*in_specs, *tangent_in_specs),
out_specs_thunk=new_out_specs_thunk, check_vma=check_vma,
manual_axes=manual_axes)
f_jvp, out_tree = ad.traceable(f_jvp, in_tree)
result = shard_map_p.bind_with_trace(trace.parent_trace, (f_jvp,) + tuple(args), params)
primal_out, tangent_out = tree_unflatten(out_tree(), result)
tangent_out = [ad.Zero(core.get_aval(p).to_tangent_aval()) if t is None else t
for p, t in zip(primal_out, tangent_out)]
return [ad.JVPTracer(trace, p, t) for p, t in zip(primal_out, tangent_out)]
ad.JVPTrace.process_shard_map = _shard_map_jvp
def _shard_map_partial_eval(trace: pe.JaxprTrace, shard_map_p,
f: lu.WrappedFun, tracers, mesh, in_specs,
out_specs_thunk, check_vma, manual_axes):
tracers = map(trace.to_jaxpr_tracer, tracers)
in_pvals = [t.pval for t in tracers]
in_knowns, in_avals, in_consts = pe.partition_pvals(in_pvals)
unk_in_specs, known_in_specs = pe.partition_list(in_knowns, in_specs)
in_avals_sharded = map(partial(shard_aval, mesh, manual_axes, check_vma),
unk_in_specs, in_avals)
f = pe.trace_to_subjaxpr_nounits_fwd2(f, trace.tag, f.debug_info, False)
f = _promote_scalar_residuals(f)
f_known, aux = pe.partial_eval_wrapper_nounits2(
f, (*in_knowns,), (*in_avals_sharded,))
all_names = _all_newly_manual_mesh_names(mesh, manual_axes)
@as_hashable_function(closure=out_specs_thunk)
def known_out_specs():
_, _, out_knowns, res_avals, _, _ = aux()
_, out_known_specs = pe.partition_list(out_knowns, out_specs_thunk())
if check_vma:
res_specs = [P(order_wrt_mesh(mesh, a.vma)) for a in res_avals]
else:
res_specs = [P(all_names)] * len(res_avals)
return (*out_known_specs, *res_specs)
known_params = dict(mesh=mesh, in_specs=(*known_in_specs,),
out_specs_thunk=known_out_specs, check_vma=check_vma,
manual_axes=manual_axes)
out = shard_map_p.bind_with_trace(trace.parent_trace,
(f_known.with_unknown_names(), *in_consts),
known_params)
in_fwd, out_fwd, out_knowns, res_avals, jaxpr, env = aux()
num_res = sum(f1 is None and f2 is None for f1, f2 in zip(in_fwd, out_fwd))
out_consts, non_fwd_res = split_list(out, [len(out) - num_res])
assert not jaxpr.constvars
unk_out_specs, _ = pe.partition_list(out_knowns, out_specs_thunk())
known_out_specs_ = known_out_specs()
res = subs_list2(in_fwd, out_fwd, in_consts, out_consts, non_fwd_res)
# TODO make res_avals be the full set, not just the non-fwd ones
res_avals_iter = iter(res_avals)
res_specs = []
for f1, f2 in zip(in_fwd, out_fwd):
if f1 is not None:
res_specs.append(known_in_specs[f1])
elif f2 is not None:
res_specs.append(known_out_specs_[f2])
else:
if check_vma:
res_vma = next(res_avals_iter).vma
res_specs.append(P(order_wrt_mesh(mesh, res_vma)))
else:
res_specs.append(P(all_names))
unk_in_specs = (*res_specs,) + (P(),) * len(env) + (*unk_in_specs,) # type: ignore[assignment]
const_tracers = map(trace.new_instantiated_const, res)
env_tracers = map(trace.to_jaxpr_tracer, env)
unk_arg_tracers = [t for t in tracers if not t.is_known()]
out_avals_sharded = [v.aval for v in jaxpr.outvars]
unk_params = dict(mesh=mesh, in_specs=unk_in_specs,
out_specs=tuple(unk_out_specs),
jaxpr=jaxpr.replace(debug_info=jaxpr.debug_info.with_unknown_names()),
check_vma=check_vma, manual_axes=manual_axes)
out_avals = map(partial(unshard_aval, mesh, check_vma), unk_out_specs,
out_avals_sharded)
out_tracers = [pe.JaxprTracer(trace, pe.PartialVal.unknown(a), None)
for a in out_avals]
effs = core.filter_named_axis_effects(jaxpr.effects, mesh.axis_names)
eqn = pe.new_eqn_recipe(trace, (*const_tracers, *env_tracers, *unk_arg_tracers),
out_tracers, shard_map_p, unk_params,
effs, source_info_util.current())
for t in out_tracers: t.recipe = eqn
return merge_lists(out_knowns, out_tracers, out_consts)
pe.JaxprTrace.process_shard_map = _shard_map_partial_eval
def _shard_map_linearize(trace, shard_map_p, f: lu.WrappedFun,
tracers, mesh, in_specs, out_specs_thunk, check_vma,
manual_axes):
primals, tangents = unzip2(map(trace.to_primal_tangent_pair, tracers))
nzs_in = tuple(type(t) is not ad.Zero for t in tangents)
f = f.with_unknown_names()
f_primal, linearize_outs_thunk = ad.linearize_subtrace(
f, trace.tag, nzs_in, f.debug_info)
f_primal = _promote_scalar_residuals_lin(f_primal, linearize_outs_thunk)
all_names = _all_newly_manual_mesh_names(mesh, manual_axes)
@as_hashable_function(closure=linearize_outs_thunk)
def fwd_out_specs_thunk():
res_avals, _, _, _, in_fwd, out_fwd = linearize_outs_thunk()
res_avals = [r for r, f1, f2 in zip(res_avals, in_fwd, out_fwd)
if f1 is None and f2 is None]
out_specs = out_specs_thunk()
if check_vma:
res_specs = [P(order_wrt_mesh(mesh, a.vma)) for a in res_avals]
else:
res_specs = [P(all_names)] * len(res_avals)
return (*res_specs, *out_specs)
fwd_params = dict(
mesh=mesh, in_specs=in_specs,
out_specs_thunk=fwd_out_specs_thunk, check_vma=check_vma,
manual_axes=manual_axes)
all_fwd_results = shard_map_p.bind_with_trace(
trace.parent_trace, (f_primal, *primals), fwd_params)
res_avals, nzs_out, lin_jaxpr, env, in_fwd, out_fwd = linearize_outs_thunk()
num_res_out = sum(f1 is None and f2 is None for f1, f2 in zip(in_fwd, out_fwd))
non_fwd_res = all_fwd_results[:num_res_out]
primals_out = all_fwd_results[num_res_out:]
residuals = subs_list2(in_fwd, out_fwd, primals, primals_out, non_fwd_res)
args_to_promote = [getattr(aval, 'shape', ()) == () and f1 is None and f2 is None
for aval, f1, f2 in zip(res_avals, in_fwd, out_fwd)]
with (_extend_axis_env(mesh, manual_axes),
use_abstract_mesh(_as_manual_mesh(mesh, manual_axes)),
config._check_vma(check_vma)):
lin_jaxpr = _promote_scalar_residuals_jaxpr(lin_jaxpr, args_to_promote)
out_specs = out_specs_thunk()
res_avals2 = [r for r, f1, f2 in zip(res_avals, in_fwd, out_fwd)
if f1 is None and f2 is None]
res_avals_iter = iter(res_avals2)
res_specs = []
for f1, f2 in zip(in_fwd, out_fwd):
if f1 is not None:
res_specs.append(in_specs[f1])
elif f2 is not None:
res_specs.append(out_specs[f2])
else:
if check_vma:
res_vma = next(res_avals_iter).vma
res_specs.append(P(order_wrt_mesh(mesh, res_vma)))
else:
res_specs.append(P(all_names))
new_in_specs = (*res_specs, *(P(),) * len(env),
*(ax for ax, nz in zip(in_specs, nzs_in) if nz))
tangent_out_specs = tuple(ax for ax, nz in zip(out_specs_thunk(), nzs_out)
if nz)
@as_hashable_function(closure=tangent_out_specs)
def tangent_out_specs_thunk():
return tangent_out_specs
tangent_params = dict(
mesh=mesh, in_specs=new_in_specs, out_specs_thunk=tangent_out_specs_thunk,
check_vma=check_vma, manual_axes=manual_axes)
# TODO(mattjj): avoid round-tripping the jaxpr through eval_jaxpr here
def f_tangent(*args):
return core.eval_jaxpr(lin_jaxpr, (), *args)
nz_tangents_in = [t for (t, nz) in zip(tangents, nzs_in) if nz]
nz_tangents_out = shard_map_p.bind_with_trace(
trace.tangent_trace,
(lu.wrap_init(f_tangent, debug_info=lin_jaxpr.debug_info),
*residuals, *env, *nz_tangents_in), tangent_params)
nz_tangents_out_iter = iter(nz_tangents_out)
tangents_out = [next(nz_tangents_out_iter) if nz else ad.Zero.from_primal_value(primal)
for nz, primal in zip(nzs_out, primals_out)]
return map(partial(ad.maybe_linearize_tracer, trace), primals_out, nzs_out, tangents_out)
ad.LinearizeTrace.process_shard_map = _shard_map_linearize
@lu.transformation2
def _promote_scalar_residuals_lin(f, linearize_outs_thunk, *args, **kwargs):
ans = f(*args, **kwargs)
_, _, _, _, in_fwd, out_fwd = linearize_outs_thunk()
num_res_out = sum(f1 is None and f2 is None for f1, f2 in zip(in_fwd, out_fwd))
residuals = ans[:num_res_out]
primals = ans[num_res_out:]
residuals = [lax.broadcast(x, (1,)) if not getattr(x, 'shape', ()) else x
for x in residuals]
return *residuals, *primals
@lu.transformation2
def _promote_scalar_residuals(f: Callable, *args, **kwargs):
jaxpr, (in_fwds, out_fwds, out_pvals, out_consts, env) = f(*args, **kwargs)
which = [f1 is None and f2 is None and not v.aval.shape
for f1, f2, v in zip(in_fwds, out_fwds, jaxpr.constvars)]
jaxpr = _promote_scalar_residuals_jaxpr(jaxpr, which)
out_consts = [lax.broadcast(x, (1,)) if not getattr(x, 'shape', ()) else x
for x in out_consts]
return jaxpr, (in_fwds, out_fwds, out_pvals, out_consts, env)
def _promote_scalar_residuals_jaxpr(jaxpr: core.Jaxpr, which: Sequence[bool]):
def fun(*res_and_args):
res, args = split_list(res_and_args, [len(jaxpr.constvars)])
res = [_rem_singleton(x) if w else x for x, w in zip(res, which)]
return core.eval_jaxpr(jaxpr, res, *args)
res_avals = [core.unmapped_aval(1, 0, v.aval) if w else v.aval
for v, w in zip(jaxpr.constvars, which)]
in_avals = [*res_avals, *[v.aval for v in jaxpr.invars]]
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(fun, debug_info=jaxpr.debug_info), in_avals)
return jaxpr
def _unmentioned2(mesh: Mesh, spec, manual_axes: frozenset[AxisName]
) -> list[AxisName]:
# We use a filtered-down version of unmentioned to avoid defensive-psum over
# more chips than required in the transpose-no-check-vma case.
name_set = _spec_to_vma(spec)
return [n for n in _all_mesh_names_except_spmd(mesh, manual_axes)
if n not in name_set]
def _shard_map_transpose(out_cts, *args,
jaxpr: core.Jaxpr, mesh, in_specs, out_specs,
check_vma, manual_axes):
mb_div = lambda x, y: x / y if y != 1 else x
out_cts = [
ad.Zero(shard_aval(mesh, manual_axes, check_vma, sp, x.aval))
if type(x) is ad.Zero else x if check_vma or dtypes.dtype(x) == dtypes.float0
else mb_div(x, prod(map(mesh.shape.get, _unmentioned2(mesh, sp, manual_axes))))
for sp, x in zip(out_specs, out_cts)
]
args = [x if type(x) is not ad.UndefinedPrimal else
ad.UndefinedPrimal(shard_aval(mesh, manual_axes, check_vma, sp, x.aval))
for sp, x in zip(in_specs, args)]
all_args, in_tree = tree_flatten((out_cts, tuple(args)))
def fun_trans_callable(out_cts, args):
# TODO(mattjj): when #26811 lands, delete this and just run backward_pass
in_undef = map(ad.is_undefined_primal, args)
res, undefs = partition_list(in_undef, args)
jaxpr_known, jaxpr_unknown, _, _ = pe.partial_eval_jaxpr_nounits(
pe.close_jaxpr(jaxpr), in_undef, False)
res_reshaped = core.jaxpr_as_fun(jaxpr_known)(*res)
in_cts = ad.backward_pass(
jaxpr_unknown.jaxpr, False, (), (*res_reshaped, *undefs), out_cts
)[len(res_reshaped):]
_, in_ct_specs = partition_list(in_undef, in_specs)
in_cts = [ad.Zero(unshard_aval(mesh, check_vma, sp, x.aval))
if type(x) is ad.Zero else x if check_vma
else lax_parallel.psum(x, tuple(_unmentioned2(mesh, sp, manual_axes)))
for sp, x in zip(in_ct_specs, in_cts)]
res_zeros = [ad_util.zero_from_primal(r) for r in res]
return merge_lists(in_undef, res_zeros, in_cts)
fun_trans_callable.__name__ = f"transpose({jaxpr.debug_info.func_name})"
fun_trans = lu.wrap_init(fun_trans_callable, debug_info=jaxpr.debug_info)
fun_trans, nz_arg_cts = ad.nonzero_outputs(fun_trans)
fun_trans_flat, out_tree = api_util.flatten_fun_nokwargs(fun_trans, in_tree)
new_in_specs = (
[core.primal_spec_to_cotangent_spec(s)
for s, x in zip(out_specs, out_cts) if type(x) is not ad.Zero] +
[s for s, x in zip(in_specs, args) if type(x) is not ad.UndefinedPrimal])
def new_out_specs_thunk():
return tuple(core.primal_spec_to_cotangent_spec(sp)
for sp, nz in zip(in_specs, nz_arg_cts()) if nz)
try:
out_flat = shard_map_p.bind(
fun_trans_flat, *all_args, mesh=mesh, in_specs=tuple(new_in_specs),
out_specs_thunk=new_out_specs_thunk, check_vma=check_vma,
manual_axes=manual_axes)
except (FloatingPointError, ZeroDivisionError) as e:
print("Invalid nan value encountered in the backward pass of a shard_map "
"function. Calling the de-optimized backward pass.")
try:
# TODO(mattjj): Remove this and do `fun_trans.call_wrapped(out_cts, args)`
# in eager mode so that output of shmap are not manual.
with api.disable_jit(True):
_ = shard_map_p.bind(
fun_trans_flat, *all_args, mesh=mesh, in_specs=tuple(new_in_specs),
out_specs_thunk=new_out_specs_thunk, check_vma=check_vma,
manual_axes=manual_axes)
except (FloatingPointError, ZeroDivisionError) as e2:
raise e2 from None
else:
api_util._raise_no_nan_in_deoptimized(e)
except _RepError as e:
fails, = e.args
if not callable(out_specs):
msg = _inout_vma_error(
fun_trans, mesh, out_tree(), list(new_out_specs_thunk()), fails)
raise ValueError(msg) from None
return tree_unflatten(out_tree(), out_flat)
ad.primitive_transposes[shard_map_p] = _shard_map_transpose
# Remat
def _partial_eval_jaxpr_custom_rule(
saveable: Callable[..., pe.RematCases_], unks_in: Sequence[bool],
inst_in: Sequence[bool], eqn: core.JaxprEqn
) -> tuple[core.JaxprEqn, core.JaxprEqn, Sequence[bool], Sequence[bool],
list[core.Var]]:
jaxpr, mesh = eqn.params['jaxpr'], eqn.params['mesh']
check_vma, manual_axes = eqn.params['check_vma'], eqn.params['manual_axes']
with (_extend_axis_env(mesh, manual_axes), config._check_vma(check_vma),
use_abstract_mesh(_as_manual_mesh(mesh, manual_axes))):
jaxpr_known, jaxpr_staged, unks_out, inst_out, num_res = \
pe.partial_eval_jaxpr_custom(jaxpr, unks_in, inst_in, False, False, saveable)
num_out_primals = len(jaxpr_known.outvars) - num_res
in_fwd = pe._jaxpr_forwarding(jaxpr_known)[num_out_primals:]
out_vars, res_vars = split_list(jaxpr_known.outvars, [num_out_primals])
idx_map = {id(v): i for i, v in enumerate(out_vars)}
out_fwd = [idx_map.get(id(v)) for v in res_vars]
which = [f1 is None and f2 is None for f1, f2 in zip(in_fwd, out_fwd)]
mesh = eqn.params['mesh']
with (_extend_axis_env(mesh, manual_axes),
use_abstract_mesh(_as_manual_mesh(mesh, manual_axes)),
config._check_vma(check_vma)):
jaxpr_known = pe.prune_jaxpr_outputs(jaxpr_known, [True] * num_out_primals + which)
jaxpr_known, jaxpr_staged = _add_reshapes(which, jaxpr_known, jaxpr_staged)
jaxpr_known = core.remove_named_axis_effects(jaxpr_known, mesh.axis_names)
jaxpr_staged = core.remove_named_axis_effects(jaxpr_staged, mesh.axis_names)
ins_known, _ = partition_list(unks_in, eqn.invars)
out_binders_known, _ = partition_list(unks_out, eqn.outvars)
_, ins_staged = partition_list(inst_in, eqn.invars)
_, out_binders_staged = partition_list(inst_out, eqn.outvars)
newvar = core.gensym()
residuals, staged_in_res_specs = [], []
for var, w in zip(jaxpr_staged.invars[:num_res], which):
if w:
rn = (P(order_wrt_mesh(mesh, var.aval.vma)) # type: ignore
if check_vma else P(_all_newly_manual_mesh_names(mesh, manual_axes)))
residuals.append(newvar(unshard_aval(mesh, check_vma, rn, var.aval)))
staged_in_res_specs.append(rn)
if check_vma:
out_res_specs_known = [P(order_wrt_mesh(mesh, var.aval.vma)) # type: ignore
for var, o in zip(res_vars, out_fwd) if o is None]
else:
out_res_specs_known = [
P(_all_newly_manual_mesh_names(mesh, manual_axes))] * sum(which)
params_known, params_staged = _pe_custom_params(
unks_in, inst_in, map(op.not_, unks_out), inst_out, in_fwd, out_fwd,
out_res_specs_known, staged_in_res_specs,
dict(eqn.params, jaxpr=jaxpr_known), dict(eqn.params, jaxpr=jaxpr_staged))
eqn_known = pe.new_jaxpr_eqn(ins_known, [*out_binders_known, *residuals],
eqn.primitive, params_known, jaxpr_known.effects,
eqn.source_info, eqn.ctx)
full_res = subs_list2(in_fwd, out_fwd, ins_known, out_binders_known, residuals)
eqn_staged = pe.new_jaxpr_eqn([*full_res, *ins_staged], out_binders_staged,
eqn.primitive, params_staged,
jaxpr_staged.effects, eqn.source_info, eqn.ctx)
assert len(eqn_staged.invars) == len(jaxpr_staged.invars)
new_inst = [x for x, inst in zip(eqn.invars, inst_in)
if type(x) is core.Var and not inst]
new_inst += [out_binders_known[f] for f in {i for i in out_fwd if i is not None}]
return eqn_known, eqn_staged, unks_out, inst_out, new_inst + residuals
pe.partial_eval_jaxpr_custom_rules[shard_map_p] = \
_partial_eval_jaxpr_custom_rule
def _add_reshapes(which: Sequence[bool],
jaxpr_known: core.Jaxpr,
jaxpr_staged: core.Jaxpr) -> tuple[core.Jaxpr, core.Jaxpr]:
# add singleton axes to residuals which are from jaxpr_known and are scalars
which_ = [w and not v.aval.shape # pytype: disable=attribute-error
for w, v in zip(which, jaxpr_staged.invars[:len(which)])]
if not any(which_): return jaxpr_known, jaxpr_staged
assert not jaxpr_known.constvars and not jaxpr_staged.constvars
def known(*args):
out = core.eval_jaxpr(jaxpr_known, (), *args)
out_known, res = split_list(out, [len(out) - sum(which)])
res = [_add_singleton(x) if not x.shape else x for x in res]
return [*out_known, *res]
avals_in = [v.aval for v in jaxpr_known.invars]
jaxpr_known, _, () = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(known, debug_info=jaxpr_known.debug_info), avals_in)
def staged(*args):
res_, ins = split_list(args, [len(which)])
res = [_rem_singleton(x) if w else x for x, w in zip(res_, which_)]
return core.eval_jaxpr(jaxpr_staged, (), *res, *ins)
res_avals = [core.unmapped_aval(1, 0, v.aval) if w else v.aval
for w, v in zip(which_, jaxpr_staged.invars[:len(which)])]
avals_in = [*res_avals, *[v.aval for v in jaxpr_staged.invars[len(which):]]]
jaxpr_staged, _, () = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(staged, debug_info=jaxpr_staged.debug_info), avals_in)
return jaxpr_known, jaxpr_staged
def _pe_custom_params(unks_in, inst_in, kept_outs_known, kept_outs_staged,
in_fwd, out_fwd, out_res_specs_known, staged_in_res_specs,
params_known, params_staged):
# prune inputs to jaxpr_known according to unks_in
in_specs_known, _ = partition_list(unks_in, params_known['in_specs'])
_, out_specs_known = partition_list(kept_outs_known, params_known['out_specs'])
out_specs_known = out_specs_known + out_res_specs_known
assert len(out_specs_known) == len(params_known['jaxpr'].outvars)
new_params_known = dict(params_known, in_specs=tuple(in_specs_known),
out_specs=tuple(out_specs_known))
# added num_res new inputs to jaxpr_staged, pruning according to inst_in
_, in_specs_staged = partition_list(inst_in, params_staged['in_specs'])
iter_staged = iter(staged_in_res_specs)
res_specs = [in_specs_known[f1] if f1 is not None else
out_specs_known[f2] if f2 is not None else
next(iter_staged) for f1, f2 in zip(in_fwd, out_fwd)]
in_specs_staged = res_specs + in_specs_staged
_, out_specs_staged = partition_list(kept_outs_staged, params_staged['out_specs'])
new_params_staged = dict(params_staged, in_specs=tuple(in_specs_staged),
out_specs=tuple(out_specs_staged))
return new_params_known, new_params_staged
# TODO(mattjj): remove this mechanism when we revise mesh scopes
def _all_mesh_names_except_spmd(
mesh: Mesh, manual_axes: frozenset[AxisName]) -> tuple[AxisName, ...]:
axis_env = core.get_axis_env()
spmd_names = axis_env.spmd_axis_names
return tuple(name for name in mesh.axis_names
if name not in spmd_names and name in manual_axes)
def _all_newly_manual_mesh_names(
mesh: BaseMesh, manual_axes: frozenset[AxisName]) -> tuple[AxisName, ...]:
axis_env = core.get_axis_env()
vmap_spmd_names = set(axis_env.spmd_axis_names)
if not (ctx_mesh := get_abstract_mesh()).empty:
mesh = ctx_mesh
already_manual_names = set(ctx_mesh.manual_axes)
else:
# TODO(mattjj): remove this mechanism when we revise mesh scopes
already_manual_names = set(axis_env.axis_sizes) # may include vmap axis_names
return tuple(name for name in mesh.axis_names
if (name not in vmap_spmd_names | already_manual_names and
name in manual_axes))
# DCE
# TODO(mattjj): de-duplicate with pe.dce_jaxpr_call_rule, and/or _pmap_dce_rule?
def _shard_map_dce(used_outputs: list[bool], eqn: core.JaxprEqn
) -> tuple[list[bool], core.JaxprEqn | None]:
if not any(used_outputs) and not pe.has_effects(eqn):
return [False] * len(eqn.invars), None
mesh = eqn.params["mesh"]
manual_axes = eqn.params["manual_axes"]
check_vma = eqn.params["check_vma"]
with (_extend_axis_env(mesh, manual_axes), config._check_vma(check_vma),
use_abstract_mesh(_as_manual_mesh(mesh, manual_axes))):
jaxpr, used_inputs = pe.dce_jaxpr(eqn.params['jaxpr'], used_outputs)
if not any(used_inputs) and not any(used_outputs) and not jaxpr.effects:
return used_inputs, None
else:
_, in_specs = partition_list(used_inputs, eqn.params['in_specs'])
_, out_specs = partition_list(used_outputs, eqn.params['out_specs'])
new_params = dict(eqn.params, jaxpr=jaxpr, in_specs=tuple(in_specs),
out_specs=tuple(out_specs))
effs = core.filter_named_axis_effects(jaxpr.effects, mesh.axis_names)
new_eqn = pe.new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[x for x, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, effs, eqn.source_info, eqn.ctx)
return used_inputs, new_eqn
pe.dce_rules[shard_map_p] = _shard_map_dce
# Mutable arrays / refs
@discharge.register_discharge_rule(shard_map_p)
def _shard_map_discharge(
in_avals, out_avals, *args, jaxpr, mesh, in_specs, out_specs, check_vma,
manual_axes):
inner_mesh = _as_manual_mesh(mesh, manual_axes)
with (_extend_axis_env(mesh, manual_axes), use_abstract_mesh(inner_mesh),
config._check_vma(check_vma)):
discharged_jaxpr, discharged_consts = discharge.discharge_state(jaxpr, ())
if discharged_consts: raise NotImplementedError
del discharged_consts
ref_specs = [spec for spec, invar in zip(in_specs, jaxpr.invars)
if isinstance(invar.aval, AbstractRef)]
params = dict(jaxpr=discharged_jaxpr, out_specs=(*out_specs, *ref_specs))
[f], params_ = shard_map_p.get_bind_params(params)
discharged_out_specs, = params_.values()
out_and_ref_vals = shard_map_p.bind(
f, *args, mesh=mesh, in_specs=in_specs, manual_axes=manual_axes,
out_specs_thunk=discharged_out_specs, check_vma=check_vma)
out_vals, ref_vals = split_list(out_and_ref_vals, [len(jaxpr.outvars)])
ref_vals_ = iter(ref_vals)
new_invals = [next(ref_vals_) if isinstance(a, AbstractRef) else None
for a in in_avals]
assert next(ref_vals_, None) is None
return new_invals, out_vals
| ShardMapTracer |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/node.py | {
"start": 1874,
"end": 2795
} | class ____(IHaveNew):
name: str
dagster_type_key: str
description: Optional[str]
is_required: bool
metadata: Mapping[str, MetadataValue]
is_dynamic: bool
def __new__(
cls,
name: str,
dagster_type_key: str,
description: Optional[str],
is_required: bool,
metadata: Optional[Mapping[str, MetadataValue]] = None,
is_dynamic: bool = False,
):
return super().__new__(
cls,
name=name,
dagster_type_key=dagster_type_key,
description=description,
is_required=is_required,
metadata=normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str), allow_invalid=True
),
is_dynamic=is_dynamic,
)
@whitelist_for_serdes(storage_field_names={"mapped_node_name": "mapped_solid_name"})
@record
| OutputDefSnap |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 29667,
"end": 30313
} | class ____(Model):
'''Describes how to calculate totals and sub-totals
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
getter = String('', help="""
References the column which generates the unique keys of this sub-total (groupby).
""")
aggregators = List(Instance(RowAggregator), help="""
Describes how to aggregate the columns which will populate this sub-total.
""")
collapsed = Bool(False, help="""
Whether the corresponding sub-total is expanded or collapsed by default.
""")
| GroupingInfo |
python | getsentry__sentry | tests/sentry/uptime/subscriptions/test_tasks.py | {
"start": 15701,
"end": 17138
} | class ____(UptimeTestCase):
def test_create_update_delete(self) -> None:
for status in (
UptimeSubscription.Status.CREATING,
UptimeSubscription.Status.UPDATING,
UptimeSubscription.Status.DELETING,
):
sub = self.create_uptime_subscription(
status=status,
date_updated=timezone.now() - (SUBSCRIPTION_STATUS_MAX_AGE * 2),
url=f"http://sentry{status}.io",
region_slugs=["default"],
)
sub_new = self.create_uptime_subscription(
status=status, date_updated=timezone.now(), url=f"http://santry{status}.io"
)
with self.tasks():
subscription_checker()
if status == UptimeSubscription.Status.DELETING:
with pytest.raises(UptimeSubscription.DoesNotExist):
sub.refresh_from_db()
sub_new.refresh_from_db()
assert sub_new.status == status.value
assert sub_new.subscription_id is None
else:
sub.refresh_from_db()
assert sub.status == UptimeSubscription.Status.ACTIVE.value
assert sub.subscription_id is not None
sub_new.refresh_from_db()
assert sub_new.status == status.value
assert sub_new.subscription_id is None
| SubscriptionCheckerTest |
python | mwaskom__seaborn | tests/test_statistics.py | {
"start": 399,
"end": 793
} | class ____:
@pytest.fixture
def x(self, rng):
return rng.normal(0, 1, 100)
@pytest.fixture
def x2(self, rng):
return rng.normal(0, 1, 742) # random value to avoid edge cases
@pytest.fixture
def y(self, rng):
return rng.normal(0, 5, 100)
@pytest.fixture
def weights(self, rng):
return rng.uniform(0, 5, 100)
| DistributionFixtures |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 3088,
"end": 3237
} | class ____:
def m[S](self: S, other: S) -> int: ...
@classmethod
def n[S](cls: type[S], other: S) -> int: ...
| SelfNotUsedInReturnAnnotation |
python | gevent__gevent | src/greentest/3.12/test_threading.py | {
"start": 70373,
"end": 71748
} | class ____(unittest.TestCase):
def test__all__(self):
restore_default_excepthook(self)
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
@requires_subprocess()
def test_gh112826_missing__thread__is_main_interpreter(self):
with os_helper.temp_dir() as tempdir:
modname = '_thread_fake'
import os.path
filename = os.path.join(tempdir, modname + '.py')
with open(filename, 'w') as outfile:
outfile.write("""if True:
import _thread
globals().update(vars(_thread))
del _is_main_interpreter
""")
expected_output = b'success!'
_, out, err = assert_python_ok("-c", f"""if True:
import sys
sys.path.insert(0, {tempdir!r})
import {modname}
sys.modules['_thread'] = {modname}
del sys.modules[{modname!r}]
import threading
print({expected_output.decode('utf-8')!r}, end='')
""")
self.assertEqual(out, expected_output)
self.assertEqual(err, b'')
| MiscTestCase |
python | Delgan__loguru | loguru/_colorizer.py | {
"start": 1145,
"end": 1477
} | class ____:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
| Fore |
python | scipy__scipy | scipy/signal/tests/test_ltisys.py | {
"start": 10695,
"end": 16421
} | class ____:
def check_matrix_shapes(self, p, q, r):
ss2tf(np.zeros((p, p)),
np.zeros((p, q)),
np.zeros((r, p)),
np.zeros((r, q)), 0)
def test_shapes(self):
# Each tuple holds:
# number of states, number of inputs, number of outputs
for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]:
self.check_matrix_shapes(p, q, r)
def test_basic(self):
# Test a round trip through tf2ss and ss2tf.
b = np.array([1.0, 3.0, 5.0])
a = np.array([1.0, 2.0, 3.0])
A, B, C, D = tf2ss(b, a)
xp_assert_close(A, [[-2., -3], [1, 0]], rtol=1e-13)
xp_assert_close(B, [[1.], [0]], rtol=1e-13)
xp_assert_close(C, [[1., 2]], rtol=1e-13)
xp_assert_close(D, [[1.]], rtol=1e-14)
bb, aa = ss2tf(A, B, C, D)
xp_assert_close(bb[0], b, rtol=1e-13)
xp_assert_close(aa, a, rtol=1e-13)
def test_zero_order_round_trip(self):
# See gh-5760
tf = (2, 1)
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[0.]], rtol=1e-13)
xp_assert_close(B, [[0.]], rtol=1e-13)
xp_assert_close(C, [[0.]], rtol=1e-13)
xp_assert_close(D, [[2.]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[2., 0]], rtol=1e-13)
xp_assert_close(den, [1., 0], rtol=1e-13)
tf = ([[5], [2]], 1)
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[0.]], rtol=1e-13)
xp_assert_close(B, [[0.]], rtol=1e-13)
xp_assert_close(C, [[0.], [0]], rtol=1e-13)
xp_assert_close(D, [[5.], [2]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[5., 0], [2, 0]], rtol=1e-13)
xp_assert_close(den, [1., 0], rtol=1e-13)
def test_simo_round_trip(self):
# See gh-5753
tf = ([[1, 2], [1, 1]], [1, 2])
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[-2.]], rtol=1e-13)
xp_assert_close(B, [[1.]], rtol=1e-13)
xp_assert_close(C, [[0.], [-1.]], rtol=1e-13)
xp_assert_close(D, [[1.], [1.]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[1., 2.], [1., 1.]], rtol=1e-13)
xp_assert_close(den, [1., 2.], rtol=1e-13)
tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1])
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[-1., -1.], [1., 0.]], rtol=1e-13)
xp_assert_close(B, [[1.], [0.]], rtol=1e-13)
xp_assert_close(C, [[-1., 0.], [0., 0.]], rtol=1e-13)
xp_assert_close(D, [[1.], [1.]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[1., 0., 1.], [1., 1., 1.]], rtol=1e-13)
xp_assert_close(den, [1., 1., 1.], rtol=1e-13)
tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4])
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[-2., -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13)
xp_assert_close(B, [[1.], [0], [0]], rtol=1e-13)
xp_assert_close(C, [[1., 2, 3], [1, 2, 3]], rtol=1e-13)
xp_assert_close(D, [[0.], [0]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[0., 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13)
xp_assert_close(den, [1., 2, 3, 4], rtol=1e-13)
tf = (np.array([1, [2, 3]], dtype=object), [1, 6])
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[-6.]], rtol=1e-31)
xp_assert_close(B, [[1.]], rtol=1e-31)
xp_assert_close(C, [[1.], [-9]], rtol=1e-31)
xp_assert_close(D, [[0.], [2]], rtol=1e-31)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[0., 1], [2, 3]], rtol=1e-13)
xp_assert_close(den, [1., 6], rtol=1e-13)
tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5])
A, B, C, D = tf2ss(*tf)
xp_assert_close(A, [[-6., -5], [1, 0]], rtol=1e-13)
xp_assert_close(B, [[1.], [0]], rtol=1e-13)
xp_assert_close(C, [[1., -3], [-4, -2]], rtol=1e-13)
xp_assert_close(D, [[0.], [1]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[0., 1, -3], [1, 2, 3]], rtol=1e-13)
xp_assert_close(den, [1., 6, 5], rtol=1e-13)
def test_all_int_arrays(self):
A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]]
B = [[0], [0], [1]]
C = [[5, 1, 0]]
D = [[0]]
num, den = ss2tf(A, B, C, D)
xp_assert_close(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14)
xp_assert_close(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13)
def test_multioutput(self):
# Regression test for gh-2669.
# 4 states
A = np.array([[-1.0, 0.0, 1.0, 0.0],
[-1.0, 0.0, 2.0, 0.0],
[-4.0, 0.0, 3.0, 0.0],
[-8.0, 8.0, 0.0, 4.0]])
# 1 input
B = np.array([[0.3],
[0.0],
[7.0],
[0.0]])
# 3 outputs
C = np.array([[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[8.0, 8.0, 0.0, 0.0]])
D = np.array([[0.0],
[0.0],
[1.0]])
# Get the transfer functions for all the outputs in one call.
b_all, a = ss2tf(A, B, C, D)
# Get the transfer functions for each output separately.
b0, a0 = ss2tf(A, B, C[0], D[0])
b1, a1 = ss2tf(A, B, C[1], D[1])
b2, a2 = ss2tf(A, B, C[2], D[2])
# Check that we got the same results.
xp_assert_close(a0, a, rtol=1e-13)
xp_assert_close(a1, a, rtol=1e-13)
xp_assert_close(a2, a, rtol=1e-13)
xp_assert_close(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14)
| TestSS2TF |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/managed_kafka.py | {
"start": 1774,
"end": 4031
} | class ____(GoogleCloudBaseOperator):
"""
Base class for Managed Kafka operators.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"gcp_conn_id",
"project_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
location: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@cached_property
def hook(self) -> ManagedKafkaHook:
return ManagedKafkaHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| ManagedKafkaBaseOperator |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/base_aws.py | {
"start": 1134,
"end": 3914
} | class ____(BaseOperator, AwsBaseHookMixin[AwsHookType]):
"""
Base AWS (Amazon) Operator Class to build operators on top of AWS Hooks.
.. warning::
Only for internal usage, this class might be changed, renamed or removed in the future
without any further notice.
Examples:
.. code-block:: python
from airflow.providers.amazon.aws.hooks.foo_bar import FooBarThinHook, FooBarThickHook
class AwsFooBarOperator(AwsBaseOperator[FooBarThinHook]):
aws_hook_class = FooBarThinHook
def execute(self, context):
pass
class AwsFooBarOperator2(AwsBaseOperator[FooBarThickHook]):
aws_hook_class = FooBarThickHook
def __init__(self, *, spam: str, **kwargs):
super().__init__(**kwargs)
self.spam = spam
@property
def _hook_parameters(self):
return {**super()._hook_parameters, "spam": self.spam}
def execute(self, context):
pass
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
:meta private:
"""
template_fields: Sequence[str] = aws_template_fields()
def __init__(
self,
*,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
verify: bool | str | None = None,
botocore_config: dict | None = None,
region: str | None | ArgNotSet = NOTSET, # Required for `.partial` signature check
**kwargs,
):
additional_params = {} if region is NOTSET else {"region": region}
hook_params = AwsHookParams.from_constructor(
aws_conn_id, region_name, verify, botocore_config, additional_params=additional_params
)
super().__init__(**kwargs)
self.aws_conn_id = hook_params.aws_conn_id
self.region_name = hook_params.region_name
self.verify = hook_params.verify
self.botocore_config = hook_params.botocore_config
self.validate_attributes()
| AwsBaseOperator |
python | optuna__optuna | optuna/visualization/_edf.py | {
"start": 648,
"end": 727
} | class ____(NamedTuple):
study_name: str
y_values: np.ndarray
| _EDFLineInfo |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 28696,
"end": 29080
} | class ____(RoleImpl):
__slots__ = ()
def _literal_coercion(self, element, *, argname=None, **kw):
if element is None:
return elements.Null()
elif element is False:
return elements.False_()
elif element is True:
return elements.True_()
else:
self._raise_for_expected(element, argname)
| ConstExprImpl |
python | huggingface__transformers | tests/models/biogpt/test_modeling_biogpt.py | {
"start": 1340,
"end": 10708
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return BioGptConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_biogpt_model_attention_mask_past(self, config, input_ids, input_mask, token_type_ids, *args):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_biogpt_model_past_large_inputs(self, config, input_ids, input_mask, token_type_ids, *args):
model = BioGptModel(config=config).to(torch_device).eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, token_type_ids, *args, gradient_checkpointing=False
):
model = BioGptForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_biogpt_weight_initialization(self, config, *args):
model = BioGptModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def create_and_check_biogpt_for_token_classification(self, config, input_ids, input_mask, token_type_ids, *args):
config.num_labels = self.num_labels
model = BioGptForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| BioGptModelTester |
python | patrick-kidger__equinox | equinox/nn/_rnn.py | {
"start": 280,
"end": 3786
} | class ____(Module):
"""A single step of a Gated Recurrent Unit (GRU).
!!! example
This is often used by wrapping it into a `jax.lax.scan`. For example:
```python
class Model(Module):
cell: GRUCell
def __init__(self, **kwargs):
self.cell = GRUCell(**kwargs)
def __call__(self, xs):
scan_fn = lambda state, input: (self.cell(input, state), None)
init_state = jnp.zeros(self.cell.hidden_size)
final_state, _ = jax.lax.scan(scan_fn, init_state, xs)
return final_state
```
"""
weight_ih: Array
weight_hh: Array
bias: Array | None
bias_n: Array | None
input_size: int = field(static=True)
hidden_size: int = field(static=True)
use_bias: bool = field(static=True)
def __init__(
self,
input_size: int,
hidden_size: int,
use_bias: bool = True,
dtype=None,
*,
key: PRNGKeyArray,
):
"""**Arguments:**
- `input_size`: The dimensionality of the input vector at each time step.
- `hidden_size`: The dimensionality of the hidden state passed along between
time steps.
- `use_bias`: Whether to add on a bias after each update.
- `dtype`: The dtype to use for all weights and biases in this GRU cell.
Defaults to either `jax.numpy.float32` or `jax.numpy.float64` depending on
whether JAX is in 64-bit mode.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
dtype = default_floating_dtype() if dtype is None else dtype
ihkey, hhkey, bkey, bkey2 = jrandom.split(key, 4)
lim = math.sqrt(1 / hidden_size)
ihshape = (3 * hidden_size, input_size)
self.weight_ih = default_init(ihkey, ihshape, dtype, lim)
hhshape = (3 * hidden_size, hidden_size)
self.weight_hh = default_init(hhkey, hhshape, dtype, lim)
if use_bias:
self.bias = default_init(bkey, (3 * hidden_size,), dtype, lim)
self.bias_n = default_init(bkey2, (hidden_size,), dtype, lim)
else:
self.bias = None
self.bias_n = None
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
@named_scope("eqx.nn.GRUCell")
def __call__(self, input: Array, hidden: Array, *, key: PRNGKeyArray | None = None):
"""**Arguments:**
- `input`: The input, which should be a JAX array of shape `(input_size,)`.
- `hidden`: The hidden state, which should be a JAX array of shape
`(hidden_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The updated hidden state, which is a JAX array of shape `(hidden_size,)`.
"""
if self.use_bias:
bias = cast(Array, self.bias)
bias_n = cast(Array, self.bias_n)
else:
bias = 0
bias_n = 0
igates = jnp.split(self.weight_ih @ input + bias, 3)
hgates = jnp.split(self.weight_hh @ hidden, 3)
reset = jnn.sigmoid(igates[0] + hgates[0])
inp = jnn.sigmoid(igates[1] + hgates[1])
new = jnn.tanh(igates[2] + reset * (hgates[2] + bias_n))
return new + inp * (hidden - new)
| GRUCell |
python | sphinx-doc__sphinx | sphinx/util/images.py | {
"start": 715,
"end": 4106
} | class ____(NamedTuple):
mimetype: str
charset: str
data: bytes
def get_image_size(filename: str | PathLike[str]) -> tuple[int, int] | None:
filename = Path(filename)
try:
size = imagesize.get(filename)
if size[0] == -1:
size = None
elif isinstance(size[0], float) or isinstance(size[1], float):
size = (int(size[0]), int(size[1]))
if size is None and PILLOW_AVAILABLE: # fallback to Pillow
with Image.open(filename) as im:
size = im.size
return size
except Exception:
return None
@overload
def guess_mimetype(filename: PathLike[str] | str, default: str) -> str: ...
@overload
def guess_mimetype(
filename: PathLike[str] | str, default: None = None
) -> str | None: ...
def guess_mimetype(
filename: PathLike[str] | str = '',
default: str | None = None,
) -> str | None:
filename = Path(filename)
ext = filename.suffix.lower()
if ext in mime_suffixes:
return mime_suffixes[ext]
if filename.exists():
try:
imgtype = _image_type_from_file(filename)
except ValueError:
pass
else:
return 'image/' + imgtype
return default
def get_image_extension(mimetype: str) -> str | None:
return _suffix_from_mime.get(mimetype)
def parse_data_uri(uri: str) -> DataURI | None:
if not uri.startswith('data:'):
return None
if ',' not in uri:
msg = 'malformed data URI'
raise ValueError(msg)
# data:[<MIME-type>][;charset=<encoding>][;base64],<data>
mimetype = 'text/plain'
charset = 'US-ASCII'
uri = uri[5:]
properties, _, data = uri.partition(',')
for prop in properties.split(';'):
if prop == 'base64':
pass # skip
elif prop.lower().startswith('charset='):
charset = prop[8:]
elif prop:
mimetype = prop.lower()
image_data = unquote_to_bytes(data) # data might be percent-encoded
if properties.endswith(';base64'):
image_data = base64.decodebytes(image_data)
return DataURI(mimetype, charset, image_data)
def _image_type_from_file(filename: PathLike[str] | str) -> str:
with open(filename, 'rb') as f:
header = f.read(32) # 32 bytes
# Bitmap
# https://en.wikipedia.org/wiki/BMP_file_format#Bitmap_file_header
if header.startswith(b'BM'):
return 'bmp'
# GIF
# https://en.wikipedia.org/wiki/GIF#File_format
if header.startswith((b'GIF87a', b'GIF89a')):
return 'gif'
# JPEG data
# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format#File_format_structure
if header.startswith(b'\xff\xd8'):
return 'jpeg'
# Portable Network Graphics
# https://en.wikipedia.org/wiki/PNG#File_header
if header.startswith(b'\x89PNG\r\n\x1a\n'):
return 'png'
# Scalable Vector Graphics
# https://svgwg.org/svg2-draft/struct.html
if b'<svg' in header.lower():
return 'svg+xml'
# TIFF
# https://en.wikipedia.org/wiki/TIFF#Byte_order
if header.startswith((b'MM', b'II')):
return 'tiff'
# WebP
# https://en.wikipedia.org/wiki/WebP#Technology
if header.startswith(b'RIFF') and header[8:12] == b'WEBP':
return 'webp'
msg = 'Could not detect image type!'
raise ValueError(msg)
| DataURI |
python | dask__dask | dask/dataframe/dask_expr/_shuffle.py | {
"start": 4524,
"end": 6423
} | class ____(ShuffleBase):
"""Abstract shuffle class
Parameters
----------
frame: Expr
The DataFrame-like expression to shuffle.
partitioning_index: str, list
Column and/or index names to hash and partition by.
npartitions: int
Number of output partitions.
ignore_index: bool
Whether to ignore the index during this shuffle operation.
method: str or Callable
Label or callback function to convert a shuffle operation
to its necessary components.
options: dict
Algorithm-specific options.
index_shuffle : bool
Whether to perform the shuffle on the index.
"""
def _lower(self):
# Use `method` to decide how to compose a
# shuffle operation from concerete expressions
# Reduce partition count if necessary
frame = self.frame
npartitions_out = self.npartitions_out
method = self.method or get_default_shuffle_method()
if npartitions_out < frame.npartitions and method != "p2p":
frame = Repartition(frame, new_partitions=npartitions_out)
ops = [
self.partitioning_index,
self.npartitions_out,
self.ignore_index,
self.options,
self.original_partitioning_index,
]
if method == "p2p":
return P2PShuffle(frame, *ops)
elif method == "disk":
return DiskShuffle(frame, *ops)
elif method == "simple":
return SimpleShuffle(frame, *ops)
elif method == "tasks":
return TaskShuffle(frame, *ops)
else:
raise ValueError(f"{method} not supported")
def _is_numeric_cast_type(dtype):
return (
pd.api.types.is_numeric_dtype(dtype)
or isinstance(dtype, CategoricalDtype)
and pd.api.types.is_numeric_dtype(dtype.categories)
)
| Shuffle |
python | pytorch__pytorch | test/nn/test_pooling.py | {
"start": 19665,
"end": 86221
} | class ____(NNTestCase):
@expectedFailureMPS # No double, float shape prop does not work
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_zero_batch(self, dtype, device):
inp = torch.ones(0, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool1d(5).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
# The tests are used to verify the functions raises errors for backward propagation
# when output_size = 0, in adaptive_{avg, max}_pool and its variants.
# These tests are explicitly written because ErrorInputs does not support backward calls
# Issue: https://github.com/pytorch/pytorch/issues/78868
@expectedFailureMPS # No double, float shape prop does not work
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.float32, torch.float64, torch.bfloat16, torch.float16)
def test_adaptive_pooling_empty_output_size(self, dtype, device):
error_msg = (
"Expected grad_output to have non-zero size for non-batch dimensions"
)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=True)
input = make_arg((1, 64, 10, 9))
output_size = 0
fns = (
nn.functional.adaptive_avg_pool2d,
nn.functional.adaptive_avg_pool3d,
nn.functional.adaptive_max_pool2d,
nn.functional.adaptive_max_pool3d,
)
for fn in fns:
with self.assertRaisesRegex(RuntimeError, error_msg):
fn(input, output_size).sum().backward()
fns2 = (
nn.functional.adaptive_avg_pool1d,
nn.functional.adaptive_max_pool1d,
)
input2 = make_arg((1, 64))
for fn in fns2:
with self.assertRaisesRegex(RuntimeError, error_msg):
fn(input2, output_size).sum().backward()
@expectedFailureMPS # Error message does not match
@onlyNativeDeviceTypes
def test_adaptive_avg_pooling_backward_fails(self, device):
grad_output = torch.randn(1, 2, 7, device=device)
input = torch.randn(1, 2, 3, 3, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected dimensions"):
torch.ops.aten._adaptive_avg_pool2d_backward(grad_output, input)
grad_output = torch.randn(1, 2, 7, 7, device=device)
input = torch.randn(1, 2, 3, 3, 3, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected dimensions"):
torch.ops.aten._adaptive_avg_pool3d_backward(grad_output, input)
@onlyNativeDeviceTypes
def test_adaptive_max_pooling_backward_fails(self, device):
grad_output = torch.randn(1, 2, 7, 7, device=device)
input = torch.randn(1, 2, 7, 7, device=device)
indices = torch.ones(1, 2, 3, 3, dtype=torch.long, device=device)
with self.assertRaisesRegex(RuntimeError, "expected sizes"):
torch.ops.aten.adaptive_max_pool2d_backward(grad_output, input, indices)
grad_output = torch.randn(1, 2, 7, 7, 7, device=device)
input = torch.randn(1, 2, 3, 3, 3, device=device)
indices = torch.ones(1, 2, 3, 3, dtype=torch.long, device=device)
with self.assertRaisesRegex(RuntimeError, "expected dimensions"):
torch.ops.aten.adaptive_max_pool3d_backward(grad_output, input, indices)
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_batch(self, device):
mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
inp = torch.ones(0, 16, 50, 32, device=device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_batch(self, device):
mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)
inp = torch.ones(0, 16, 50, 32, 32, device=device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, 32, device=device)
mod(inp)
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_out_size(self, device):
mod = nn.FractionalMaxPool2d([2, 2], output_size=[0, 1])
inp = torch.rand([16, 50, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((16, 50, 0, 1), device=device))
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_out_size(self, device):
mod = nn.FractionalMaxPool3d([3, 2, 2], output_size=[0, 1, 1])
inp = torch.rand([16, 50, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((16, 0, 1, 1), device=device))
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_samples(self, device):
samples = torch.rand([0, 16, 2], device=device)
mod = nn.FractionalMaxPool2d(
[2, 2], output_size=[1, 1], _random_samples=samples
)
inp = torch.randn([0, 16, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((0, 16, 1, 1), device=device))
inp1 = torch.randn([1, 16, 32, 32], device=device)
with self.assertRaisesRegex(RuntimeError, "Expect _random_samples"):
mod(inp1)
@expectedFailureMPS # Op not implemented
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_samples(self, device):
samples = torch.rand([0, 16, 3], device=device)
mod = nn.FractionalMaxPool3d(
[3, 2, 2], output_size=[1, 1, 1], _random_samples=samples
)
inp = torch.randn([0, 16, 50, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((0, 16, 1, 1, 1), device=device))
inp1 = torch.randn([1, 16, 50, 32, 32], device=device)
with self.assertRaisesRegex(RuntimeError, "Expect _random_samples"):
mod(inp1)
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_errors(self, device):
samples = torch.rand([0, 16, 3], device=device)
with self.assertRaisesRegex(ValueError, "kernel_size must greater than 0"):
nn.FractionalMaxPool3d(0, output_size=[1, 1, 1], _random_samples=samples)
with self.assertRaisesRegex(ValueError, "kernel_size must greater than 0"):
nn.FractionalMaxPool3d(
[0, 0, 0], output_size=[1, 1, 1], _random_samples=samples
)
samples = torch.randn(1, 3, 10, 10, 10)
with self.assertRaisesRegex(RuntimeError, "too large relative to"):
nn.FractionalMaxPool3d(
kernel_size=9223372036854775803,
output_size=[1, 1, 1],
)(samples)
with self.assertRaisesRegex(ValueError, "kernel_size must greater than 0"):
nn.FractionalMaxPool3d(
kernel_size=-1,
output_size=[1, 1, 1],
)(samples)
@onlyNativeDeviceTypes
def test_MaxPool3d_errors(self, device):
samples = torch.randn(1, 3, 10, 10, 10)
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
nn.MaxPool3d(
kernel_size=9223372036854775803,
)(samples)
with self.assertRaisesRegex(
RuntimeError, "kernel size should be greater than zero"
):
nn.MaxPool3d(
kernel_size=-1,
)(samples)
@onlyNativeDeviceTypes
def test_MaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.MaxPool1d(3, stride=2).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
# 1D is supposed to be okay with 0 numel() inputs so dont test
# error raising for that case.
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.MaxPool2d(3, stride=2).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.MaxPool3d(3, stride=2).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_MaxUnpool_zero_batch_dim(self, device):
pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
@slowTest
@onlyNativeDeviceTypes
@skipCUDAIfRocm
@parametrize_test(
"module_name,module_size,output_size,test_index,should_error",
[
# Some tests are failing in trunk https://github.com/pytorch/pytorch/issues/103854
subtest(
("MaxUnpool2d", (2, 2), (1, 3, 4, 5), -1, True),
name="case1",
),
subtest(
("MaxUnpool2d", (2, 2), (1, 3, 4, 5), 2 * 2 * 4 * 5, True),
name="case2",
),
subtest(
("MaxUnpool2d", (2, 2), (1, 3, 4, 5), (2 * 2 * 4 * 5) - 1, False),
name="case3",
),
subtest(
("MaxUnpool2d", (2, 3), (2, 1, 4, 2), 2 * 3 * 4 * 2, True),
name="case4",
),
subtest(
("MaxUnpool2d", (2, 3), (2, 1, 4, 2), (2 * 3 * 4 * 2) - 1, False),
name="case5",
),
subtest(
("MaxUnpool3d", (2, 2, 2), (1, 3, 4, 5), -1, True),
name="case6",
),
subtest(
("MaxUnpool3d", (2, 2, 2), (1, 3, 4, 5), 2 * 2 * 2 * 3 * 4 * 5, True),
name="case7",
),
subtest(
(
"MaxUnpool3d",
(2, 2, 2),
(1, 3, 4, 5),
(2 * 2 * 2 * 3 * 4 * 5) - 1,
False,
),
name="case8",
),
subtest(
("MaxUnpool3d", (2, 2, 2), (2, 3, 4, 1), 2 * 2 * 2 * 3 * 4 * 1, True),
name="case9",
),
subtest(
(
"MaxUnpool3d",
(2, 2, 2),
(2, 3, 4, 1),
(2 * 2 * 2 * 3 * 4 * 1) - 1,
False,
),
name="case10",
),
],
)
def test_MaxUnpool_index_errors(
self, device, module_name, module_size, output_size, test_index, should_error
):
# NOTE: CUDA tests need to be run in a subprocess because they cause device asserts
if torch.device(device).type == "cuda":
error_msgs = {
"MaxUnpool2d": r"Assertion `maxind >= 0 && maxind < outputImageSize` failed",
"MaxUnpool3d": r"Assertion `index >= 0 && index < outputImageSize` failed",
}
script = f"""
import torch
unpool = torch.nn.{module_name}({module_size}).to('{device}')
output = torch.rand({output_size}, dtype=torch.float32, device='{device}')
indices = torch.zeros({output_size}, dtype=torch.int64, device='{device}')
indices.flatten()[0] = {test_index}
unpool(output, indices)
torch.cuda.synchronize()
"""
p = subprocess.run(
[sys.executable, "-c", script],
cwd=os.path.dirname(os.path.realpath(__file__)),
capture_output=True,
text=True,
)
output = p.stdout + "\n" + p.stderr
error_msg = error_msgs[module_name]
if should_error:
self.assertIn(error_msg, output, "The expected error was not found")
else:
self.assertNotIn("Error", output, "Should not have produced an error")
else:
module_class = getattr(torch.nn, module_name)
unpool = module_class(module_size).to(device)
output = torch.rand(output_size, dtype=torch.float32, device=device)
indices = torch.zeros(output_size, dtype=torch.int64, device=device)
indices.flatten()[0] = test_index
if should_error:
with self.assertRaisesRegex(
RuntimeError, r"Found an invalid max index:"
):
unpool(output, indices)
else:
unpool(output, indices)
# https://github.com/pytorch/pytorch/issues/163409
@onlyNativeDeviceTypes
def test_MaxUnpool_invalid_output_size(self, device):
input2d = torch.randn(1, 1, 1)
input3d = torch.randn(1, 1, 1, 1, 1)
unpool2d = torch.nn.MaxUnpool2d(())
unpool3d = torch.nn.MaxUnpool3d(())
with self.assertRaisesRegex(RuntimeError, "There should be exactly"):
unpool2d(input2d, torch.zeros_like(input2d, dtype=torch.int64))
with self.assertRaisesRegex(RuntimeError, "There should be exactly"):
unpool3d(input3d, torch.zeros_like(input3d, dtype=torch.int64))
@expectedFailureMPS
@onlyNativeDeviceTypes
def test_AdaptiveMaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.AdaptiveMaxPool1d(3).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, device=device)
mod(inp)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.AdaptiveMaxPool2d(3).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.AdaptiveMaxPool3d(3).to(device)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyCPU
def test_LPPool1d_kernel_size_overflow_large(self, device):
avgpool = torch.nn.LPPool1d(
-1.38119e150, 7879455037536781369, ceil_mode=True
).to(device)
inp = torch.randn(3, 15, device=device)
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
avgpool(inp)
@onlyNativeDeviceTypes
def test_AvgPool2d_empty(self, device):
avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)
inp = torch.randn(0, 16, 20, 32, device=device)
_test_module_empty_input(self, avgpool, inp, check_size=False)
clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(
memory_format=torch.channels_last
)
_test_module_empty_input(self, avgpool, clast_inp, check_size=False)
# test with empty non-batch input
with self.assertRaisesRegex(RuntimeError, "3D or 4D"):
inp = torch.randn(16, 0, 20, 32, device=device)
avgpool(inp)
@parametrize_test("kernel", ["max", "avg"])
@parametrize_test("pooling_dims", [1, 2, 3])
def test_pooling_shape(self, device, kernel, pooling_dims):
"""Test the output shape calculation for pooling functions"""
if kernel == "max" and pooling_dims == 1:
# This case causes the process to abort, so need to skip it for now
self.skipTest("Skipping to avoid abort")
# Checks output shape against expected for 1D, 2D and 3D
def check(expected_out_shape, sizes, *args, **kwargs):
if hasattr(torch.nn.functional, f"{kernel}_pool{pooling_dims}d"):
op = getattr(torch.nn.functional, f"{kernel}_pool{pooling_dims}d")
t = torch.randn(sizes[: pooling_dims + 2], device=device)
self.assertEqual(
op(t, *args, **kwargs).shape, expected_out_shape[: pooling_dims + 2]
)
check(
(1, 1, 3, 3, 4),
(1, 1, 5, 6, 7),
kernel_size=1,
stride=2,
padding=0,
ceil_mode=True,
)
check(
(1, 1, 2, 3, 3),
(1, 1, 3, 4, 5),
kernel_size=2,
stride=2,
padding=1,
ceil_mode=False,
)
check(
(1, 1, 2, 3, 3),
(1, 1, 3, 4, 5),
kernel_size=2,
stride=2,
padding=1,
ceil_mode=True,
)
# Test case from issue https://github.com/pytorch/pytorch/issues/45357
x = torch.randn(1, 1, 6, 7, device=device)
y = torch.nn.functional.max_pool2d(
x, 1, stride=(2, 2), padding=0, ceil_mode=True
)
self.assertEqual(y.size(), (1, 1, 3, 4))
@onlyNativeDeviceTypes # TODO: fix on XLA
def test_adaptive_avg_pool2d_output_size_one(self, device):
def helper(size, memory_format):
x = torch.randint(
1, 10, size, dtype=torch.float, device=device, requires_grad=True
)
if memory_format == "non_contiguous":
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
for mf in (torch.contiguous_format, torch.channels_last, "non_contiguous"):
helper((2, 3, 6, 6), mf)
@onlyNativeDeviceTypes
def test_adaptive_avg_pool3d_output_size_one(self, device):
x = torch.randn(
(2, 3, 6, 6, 6), dtype=torch.float, device=device, requires_grad=True
)
net = torch.nn.AdaptiveAvgPool3d(1)
out = net(x)
ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1, 1])
@expectedFailureMPS # Runtime Error not raised for mps
@expectedFailureMeta # Runtime Error not raised for meta
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)
def test_adaptive_pooling_no_suppot_input(self, device, dtype):
for numel in (2, 3):
for pool_type in ("Max", "Avg"):
cls_name = f"Adaptive{pool_type}Pool{numel}d"
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1), device=device).to(dtype)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
module(input)
@expectedFailureMPS # TODO: fixme
@onlyNativeDeviceTypes
@gcIfJetson
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_avg_pool2d_nhwc(self, device, dtype):
def helper(
n,
c,
h,
w,
kernel_size,
stride=None,
count_include_pad=True,
divisor_override=None,
padding=0,
):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(
n,
c,
(h - kernel_size) // stride + 1,
(w - kernel_size) // stride + 1,
dtype=dtype,
device=device,
)
pool = torch.nn.AvgPool2d(
kernel_size,
stride=stride,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AvgPool2d(
kernel_size,
stride=stride,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 3)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)
helper(4, 8, 8, 8, 3, divisor_override=42)
helper(4, 8, 8, 8, 7)
# ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.
if TEST_WITH_ROCM and "cuda" in device:
torch.cuda.empty_cache()
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(4, 8, 7, 7, 3, padding=2, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d_corner_cases(self, device, dtype):
def check(x, args, expected):
model = torch.nn.MaxPool1d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype)
expected = torch.tensor(expected, device=device, dtype=dtype)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check([[1]], (1, None, 0, 1, False, False), [[1]])
check([[1]], (2, None, 1, 2, False, False), [[float("-inf")]])
check(
[[1], [1]],
(2, None, 1, 2, False, False),
[[float("-inf")], [float("-inf")]],
)
check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])
check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])
@onlyCPU
@dtypes(torch.float, torch.double)
@skipIfTorchDynamo("OOMs https://github.com/pytorch/pytorch/issues/111320")
def test_max_pool1d(self, device, dtype):
# FIXME For now compare against max_pool1d with indices
def check(x, *args, **kwargs):
model = torch.nn.MaxPool1d(*args, **kwargs)
ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)
self.assertEqual(model(x), ref_model(x)[0])
sizes = [random.sample(range(8, 128), 3) for _ in range(3)]
kernel_sizes = random.sample(range(1, 5), 3)
strides = random.sample(range(1, 5), 3)
dilations = random.sample(range(1, 5), 3)
ceil_modes = [True, False]
for size, kernel_size, stride, dilation, ceil_mode in itertools.product(
sizes, kernel_sizes, strides, dilations, ceil_modes
):
padding = random.sample(range(math.floor(kernel_size / 2) + 1), 1)
check(
torch.randn(size, device=device, dtype=dtype),
kernel_size,
stride,
padding,
dilation,
ceil_mode=ceil_mode,
)
# Non-contiguous test
tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]
check(tensor, 3, 2, 1, 2, ceil_mode=True)
check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)
@onlyCUDA
@gcIfJetson
def test_max_pool2d(self, device):
def helper(n, c, h, w, ks):
x = torch.randn(
n, c, h, w, device="cuda", dtype=torch.float, requires_grad=True
)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks)
y = pool(x)
ref_y = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(1, 100000, 32, 32, ks=4)
helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d
@expectedFailureMPS # TODO: Fixme
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.bfloat16, torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@gcIfJetson
def test_max_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(
n,
c,
(h - kernel_size) // stride + 1,
(w - kernel_size) // stride + 1,
dtype=dtype,
device=device,
)
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(
device
)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(
device
)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 7)
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.int32, torch.int64)
def test_max_pool2d_corner_cases(self, device, dtype):
def check(x, args, expected, memory_format):
model = torch.nn.MaxPool2d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype).to(
memory_format=memory_format
)
expected = torch.tensor(expected, device=device, dtype=dtype).to(
memory_format=memory_format
)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check(
[[[[-1, -2], [-3, -4]]]],
(2, 2, 1, 2, False, True),
[[[[-4, -4], [-4, -4]]]],
torch.contiguous_format,
)
check(
[[[[-1, -2], [-3, -4]]]],
(2, 2, 1, 2, False, True),
[[[[-4, -4], [-4, -4]]]],
torch.channels_last,
)
@expectedFailureMPS # TODO: Fixme
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.bfloat16, torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@gcIfJetson
def test_max_pool3d_ndhwc(self, device, dtype):
def helper(n, c, h, w, d, kernel_size, stride=None):
batch = n
if not batch:
batch = 1
input = torch.randn(batch, c, d, h, w, dtype=dtype, device=device)
input = input.contiguous(
memory_format=torch.channels_last_3d
).requires_grad_()
if not n:
input = input.squeeze(0).detach().clone().requires_grad_()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 3
if stride is None:
stride = kernel_size
elif isinstance(stride, int):
stride = [stride] * 3
grad = torch.randn(
batch,
c,
(d - kernel_size[0]) // stride[0] + 1,
(h - kernel_size[1]) // stride[1] + 1,
(w - kernel_size[2]) // stride[2] + 1,
dtype=dtype,
device=device,
)
grad = grad.contiguous(memory_format=torch.channels_last_3d)
if not n:
grad = grad.squeeze(0)
pool = torch.nn.MaxPool3d(kernel_size, stride, return_indices=True).to(
device
)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool3d(kernel_size, stride, return_indices=True).to(
device
)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
if len(out.shape) == 4:
self.assertTrue(
out.unsqueeze(0).is_contiguous(memory_format=torch.channels_last_3d)
)
else:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
if len(ind.shape) == 4:
self.assertTrue(
ind.unsqueeze(0).is_contiguous(memory_format=torch.channels_last_3d)
)
else:
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
if dtype == torch.half:
self.assertEqual(input.grad, ref_input.grad, atol=0.05, rtol=0.01)
else:
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 8, 7)
helper(4, 8, 8, 8, 8, (5, 6, 7))
helper(1, 8, 8, 8, 8, (5, 6, 7))
helper(0, 6, 12, 13, 14, (5, 6, 7))
helper(4, 8, 7, 7, 7, 3, stride=1)
helper(10, 128, 19, 19, 19, 3, stride=2)
helper(10, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(1, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(0, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(1, 79, 4, 4, 4, 3, stride=2)
helper(0, 79, 4, 4, 4, 3, stride=2)
@onlyCPU
@dtypes(torch.half, torch.bfloat16)
def test_max_pool_bfloat16_half(self, device, dtype):
def helper(shape, kernel_size, stride, memory_format, dtype):
input = torch.randn(shape, dtype=dtype, device=device)
input = input.to(memory_format=memory_format).requires_grad_()
if len(shape) == 4:
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(
device
)
else:
pool = torch.nn.MaxPool3d(kernel_size, stride, return_indices=True).to(
device
)
input2 = input.detach().clone().float().requires_grad_(True)
out, ind = pool(input)
out.sum().backward()
out2, ind2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype))
self.assertEqual(ind, ind2)
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
helper((4, 30, 8, 8), 7, 1, torch.contiguous_format, dtype)
helper((4, 65, 8, 8), 7, 1, torch.channels_last, dtype)
helper((1, 19, 20, 10), 8, 2, torch.contiguous_format, dtype)
helper((1, 19, 20, 10), 8, 2, torch.channels_last, dtype)
helper((4, 30, 8, 8), 7, 1, torch.contiguous_format, dtype)
helper((4, 65, 8, 8), 7, 1, torch.channels_last, dtype)
helper((1, 19, 10, 10, 10), 8, 2, torch.contiguous_format, dtype)
helper((1, 19, 10, 9, 14), 8, 2, torch.channels_last_3d, dtype)
helper((4, 10, 3, 8, 8), 3, 1, torch.contiguous_format, dtype)
helper((4, 10, 8, 8, 8), 7, 1, torch.channels_last_3d, dtype)
@onlyCUDA
@gcIfJetson
def test_max_pool2d_indices(self, device):
def helper(n, c, h, w, ks):
if n is None:
x = torch.randn(
c, h, w, device="cuda", dtype=torch.float, requires_grad=True
)
else:
x = torch.randn(
n, c, h, w, device="cuda", dtype=torch.float, requires_grad=True
)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)
y, idx = pool(x)
ref_y, ref_idx = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(
idx, ref_idx
) # assertEqual implicitly compares shape for tensors
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(None, 3, 50, 50, ks=5)
@onlyNativeDeviceTypes
def test_max_pool2d_with_indices_backward_fails(self, device):
grad_output = torch.randn(1, 2, 7, 7, device=device)
input = torch.randn(1, 2, 7, 7, device=device)
indices = torch.ones(1, 2, 3, 3, dtype=torch.long, device=device)
kernel_size = [3, 3]
stride = [1, 1]
padding = [1, 1]
dilation = [1, 1]
ceil_mode = False
with self.assertRaisesRegex(RuntimeError, "Expected a tensor of dimension"):
torch.ops.aten.max_pool2d_with_indices_backward(
grad_output,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
indices,
)
def test_max_unpool_invalid_indices(self):
input = torch.randn(1, 1, 2, 2)
negative_indices = torch.tensor([[[[-1, 0], [0, 2]]]], dtype=torch.int64)
large_indices = torch.tensor([[[[10000, 10], [0, 2]]]], dtype=torch.int64)
output_size = (2, 2)
with self.assertRaisesRegex(RuntimeError, "Found an invalid max index"):
F.max_unpool2d(input, negative_indices, output_size)
with self.assertRaisesRegex(RuntimeError, "Found an invalid max index"):
F.max_unpool2d(input, large_indices, output_size)
input = torch.randn(1, 1, 2, 2, 2)
negative_indices = torch.tensor(
[[[[[-1, 10], [0, 2]], [[1, 3], [4, 5]]]]], dtype=torch.int64
)
large_indices = torch.tensor(
[[[[[10000, 10], [0, 2]], [[1, 3], [4, 5]]]]], dtype=torch.int64
)
output_size = (2, 2, 2)
with self.assertRaisesRegex(RuntimeError, "Found an invalid max index"):
F.max_unpool3d(input, negative_indices, output_size)
with self.assertRaisesRegex(RuntimeError, "Found an invalid max index"):
F.max_unpool3d(input, large_indices, output_size)
@onlyCPU
@dtypes(torch.half, torch.bfloat16)
def test_avg_pool2d_reduced_floating(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).to(
dtype=dtype
)
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.AvgPool2d(kernel_size, stride).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype))
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
@dtypes(torch.float, torch.double)
@dtypesIfMPS(torch.float)
@expectedFailureMPS # test_adaptive_pooling_max_nhwc currently fails on MPS - ISSUE#
def test_adaptive_pooling_max_nhwc(self, device, dtype):
def helper(input_size, output_plane_size, contig):
n_plane_dims = len(output_plane_size)
mod = (
torch.nn.AdaptiveMaxPool2d
if n_plane_dims == 2
else torch.nn.AdaptiveMaxPool3d
)
channels_last = (
torch.channels_last if n_plane_dims == 2 else torch.channels_last_3d
)
output_size = input_size[:2] + output_plane_size
input = torch.randint(1, 10, input_size, device=device, dtype=dtype)
input = input.contiguous(memory_format=channels_last)
grad = torch.randint(1, 10, output_size, device=device, dtype=dtype)
grad = grad.contiguous(memory_format=channels_last)
if not contig:
input = input[:, ::2]
grad = grad[:, ::2]
input.requires_grad_(True)
pool = mod(output_plane_size, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = mod(output_plane_size, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
# channels_last_3d case does not return channels_last_3d outputs
if n_plane_dims == 2:
self.assertTrue(out.is_contiguous(memory_format=channels_last))
self.assertTrue(ind.is_contiguous(memory_format=channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper((4, 8, 10, 10), (7, 7), contig)
helper((4, 8, 9, 14), (5, 8), contig)
helper((4, 8, 11, 11), (1, 1), contig)
helper((2, 1, 3, 3), (1, 1), contig)
helper((4, 8, 10, 10, 10), (7, 7, 7), contig)
helper((4, 8, 11, 11, 11), (1, 1, 1), contig)
helper((2, 1, 3, 3, 3), (1, 1, 1), contig)
@dtypes(torch.float, torch.double)
@dtypesIfMPS(torch.float)
@expectedFailureMPS # test_pooling_max_nhwc currently fails on MPS - ISSUE#
def test_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride, padding, dilation, contig, device):
output_height = math.floor(
(h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
/ stride[0]
+ 1
)
output_width = math.floor(
(w + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
/ stride[1]
+ 1
)
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(
1, 10, (n, c, output_height, output_width), device=device, dtype=dtype
)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation,
return_indices=True,
ceil_mode=False,
)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation,
return_indices=True,
ceil_mode=False,
).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, (2, 2), (1, 1), (1, 1), (2, 2), contig, device)
helper(4, 8, 9, 14, (2, 2), (1, 1), (1, 1), (2, 2), contig, device)
helper(4, 8, 11, 11, (4, 4), (2, 2), (2, 2), (2, 2), contig, device)
@onlyCUDA
def test_pool3d_size_one_feature_dim(self, device):
# Tests crazy strides for feature dim of size 1
x = torch.randn(7, 1, 5, 3, 2, device=device)
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
"max_pool3d": lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
"avg_pool3d": lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
# Should not crash
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.to(device), msg=test)
@onlyCUDA
@largeTensorTest("18GB")
@largeTensorTest("180GB", "cpu")
def test_pool3d_large_size_int64(self, device):
# See https://github.com/pytorch/pytorch/issues/52822
x = torch.randn(
70, 32, 100, 100, 100, dtype=torch.half, device=device, requires_grad=True
)
y = torch.nn.functional.max_pool3d(x, 5)
g = torch.randn_like(y, dtype=torch.half)
torch.cuda.synchronize()
y.backward(g)
torch.cuda.synchronize()
ref_x = x.detach().cpu().float() # max_pool3d_cpu is not implemented for half
ref_x.requires_grad = True
ref_g = g.cpu().float()
ref_y = torch.nn.functional.max_pool3d(ref_x, 5)
ref_y.backward(ref_g)
self.assertEqual(y, ref_y, exact_dtype=False)
self.assertEqual(x.grad, ref_x.grad, exact_dtype=False)
@onlyCUDA
def test_AvgPool3d_backward_after_cat_dim1_device(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device=device)
# increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
def _test_maxpool_indices(
self, num_dim, adaptive=False, device="cpu", dtype=torch.float
):
def expected_indices(dim, dtype):
if dim == 1:
return torch.tensor([1, 3], dtype=dtype).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=dtype).repeat(2, 2, 1, 1)
def expected_grad(dim, dtype):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=dtype).repeat(2, 2, 1)
grad = expected_grad(dim - 1, dtype=dtype)
zero = torch.zeros(grad.size(), dtype=dtype)
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim, dtype):
if dim == 1:
return torch.arange(2, 17, 2, dtype=dtype).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8, dtype=dtype)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = f"AdaptiveMaxPool{num_dim}d"
else:
# FIXME(#105716): Test fails when using f-string
cls_name = f"MaxPool{num_dim}d"
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = (
torch.arange(1, numel + 1)
.view(2, 2, *repeat(4, num_dim))
.to(device, dtype=dtype)
)
input_var = input.detach().clone().requires_grad_()
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim, dtype=indices.data.dtype)
expected_output = expected_output(num_dim, dtype=output.data.dtype)
self.assertEqual(indices.dim(), input.dim())
self.assertEqual(indices.data.squeeze(), expected_indices)
self.assertEqual(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim, dtype=input_var.grad.data.dtype)
self.assertEqual(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
# Make sure -Infinity is handled correctly
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool1d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool2d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[[float("-inf")]]]])
m = nn.MaxPool3d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0, 0], 0)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_AdaptiveMaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_AdaptiveMaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@expectedFailureMPS
@dtypes(torch.float)
def test_AdaptiveMaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@expectedFailureMPS
@dtypes(torch.float)
def test_maxpool_indices_no_batch_dim(self, device, dtype):
"""Check that indices with no batch dim is consistent with a single batch."""
max_pool_cases = [
(
nn.MaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype),
),
(
nn.MaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype),
),
(
nn.MaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype),
),
(
nn.AdaptiveMaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype),
),
(
nn.AdaptiveMaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype),
),
(
nn.AdaptiveMaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype),
),
]
for module, input in max_pool_cases:
_, indices_no_batch = module(input)
_, indicies_single_batch = module(input.unsqueeze(0))
self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@expectedFailureMPS # Exception not raise
@onlyNativeDeviceTypes # TODO: Fails on XLA
@gcIfJetson
def test_max_pool_nan_inf(self, device, dtype):
for adaptive in ["", "adaptive_"]:
for num_dim in [1, 2, 3]:
fn_name = f"{adaptive}max_pool{num_dim}d"
fn = getattr(F, fn_name)
x = torch.full(
[1, 1] + num_dim * [3],
nan,
device=device,
dtype=dtype,
requires_grad=True,
)
res = fn(x, 1 if adaptive else 3)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x.requires_grad_(False)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
x2 = torch.full(
[1, 1] + num_dim * [3],
-inf,
device=device,
dtype=dtype,
requires_grad=True,
)
res2 = fn(x2, 1 if adaptive else 3)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
x2.requires_grad_(False)
res2 = fn(x2, 1 if adaptive else 3)
self.assertTrue(math.isinf(res2.item()))
@expectedFailureMPS # float64
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool2d(self, device):
with set_default_dtype(torch.double):
x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples
)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3))
if self.device_type != "cuda":
# Reference: https://github.com/pytorch/pytorch/issues/52427
# Raises -> RuntimeError: TensorAccessor expected 4 dims but tensor has 3
# on CUDA in gradcheck
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool2d(
x,
kernel_size=kernel_size,
output_size=(3, 3),
_random_samples=samples,
)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [
((9, 3), err_large_msg + "height"),
((3, 9), err_large_msg + "width"),
((3,), err_out_size_msg),
((), err_out_size_msg),
]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool2d(
x, (2, 2), output_size=output_size, _random_samples=samples
)
@onlyNativeDeviceTypes
def test_fractional_max_pool2d_backward_fails(self, device):
grad_output = torch.randn(1, 1, 2, 3, 3, device=device)
input = torch.randn(1, 2, 7, 7, device=device)
kernel_size = (2, 2)
output_size = (3, 3)
indices = torch.ones(1, 2, 3, 3, dtype=torch.long, device=device)
with self.assertRaisesRegex(RuntimeError, "gradOutput sizes unexpected"):
torch.ops.aten.fractional_max_pool2d_backward(
grad_output, input, kernel_size, output_size, indices
)
@expectedFailureMPS # float64
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool3d(self, device):
with set_default_dtype(torch.double):
x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 3).uniform_()
def func(x):
return F.fractional_max_pool3d(
x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples
)
self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,), (1, 1)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool3d(
x,
kernel_size=kernel_size,
output_size=(3, 3, 3),
_random_samples=samples,
)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [
((9, 3, 3), err_large_msg + "time"),
((3, 9, 3), err_large_msg + "height"),
((3, 3, 9), err_large_msg + "width"),
((3, 3), err_out_size_msg),
((3,), err_out_size_msg),
((), err_out_size_msg),
]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool3d(
x, (2, 2, 2), output_size=output_size, _random_samples=samples
)
@expectedFailureMPS # Not implemented
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes # TODO: Fails on XLA
def test_fractional_max_pool_nan_inf(self, device, dtype):
for num_dim in [2, 3]:
fn_name = f"FractionalMaxPool{num_dim}d"
fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)
x = torch.full(
[1, 1] + num_dim * [3],
nan,
device=device,
dtype=dtype,
requires_grad=True,
)
res = fn(x)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x2 = torch.full(
[1, 1] + num_dim * [3],
-inf,
device=device,
dtype=dtype,
requires_grad=True,
)
res2 = fn(x2)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
@expectedFailureMPS # TODO: Fix me
@onlyNativeDeviceTypes # TODO: RuntimeError message different on XLA
def test_pooling_zero_stride(self, device):
for op in ("max", "avg"):
for num_dim in [1, 2, 3]:
fn_name = f"{op}_pool{num_dim}d"
fn = getattr(F, fn_name)
x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)
self.assertRaisesRegex(
RuntimeError,
r"stride should not be zero|stride must be greater than zero",
lambda: fn(x, kernel_size=2, stride=0),
)
fn_module_name = f"{op.title()}Pool{num_dim}d"
fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)
self.assertRaisesRegex(
RuntimeError,
r"stride should not be zero|stride must be greater than zero",
lambda: fn_module(x),
)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_pool_large_size(self, device, dtype):
for op in ("max", "avg"):
for num_dim in [1, 2, 3]:
fn_name = f"{op}_pool{num_dim}d"
fn = getattr(F, fn_name)
# 16777217 is the smallest integer not expressible in float32
x = torch.ones(
[1, 1, 16777217] + (num_dim - 1) * [1], device=device, dtype=dtype
)
res = fn(x, 1, stride=1, padding=0)
# check if the output shape was still computed correctly
self.assertEqual(x.shape[2], res.shape[2])
@onlyCUDA
@largeTensorTest("6GB")
def test_pooling_large(self, device):
def helper(pool):
inp = torch.randn(
2**7 + 10, 2**8, 2**8, 2**8, dtype=torch.half, device="cuda"
)
self.assertTrue(inp.numel() > 2**31 - 1)
pool(inp)
torch.cuda.synchronize() # asserts test finishes normally without raising errors
helper(nn.MaxPool2d(4, 4))
helper(nn.AvgPool2d(4, 4))
helper(nn.FractionalMaxPool2d(4, 4))
helper(nn.AdaptiveMaxPool2d((2**6, 2**6)))
helper(nn.AdaptiveAvgPool2d((2**6, 2**6)))
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@expectedFailureMPS
@dtypes(torch.float)
def test_pool_invalid_size(self, device, dtype):
for op in ("max", "avg"):
for num_dim in [1, 2, 3]:
fn_name = f"{op}_pool{num_dim}d"
if op == "max":
# New implementation without indices supports empty tensors
# TODO(Heitor) change once with_indices code is updated
fn_name += "_with_indices"
fn = getattr(F, fn_name)
# use a configuration that gives zero outputs only
# when doing a correct floor division by the stride
x = torch.ones([1, 1] + num_dim * [4], device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"too small|smaller than"):
try:
fn(x, 3, stride=2, padding=0, dilation=2)
except TypeError:
# some implementations do not support dilation
fn(x, 6, stride=2, padding=0)
@onlyCUDA
def test_pooling_bfloat16(self, device):
_test_bfloat16_ops(
self,
torch.nn.AvgPool1d(3, stride=2),
device,
inp_dims=(8, 4, 16),
prec=0.05,
)
_test_bfloat16_ops(
self,
torch.nn.AvgPool2d(3, stride=2),
device,
inp_dims=(8, 4, 16, 16),
prec=0.05,
)
_test_bfloat16_ops(
self,
torch.nn.AvgPool3d(3, stride=2),
device,
inp_dims=(8, 4, 16, 16, 16),
prec=0.05,
)
_test_bfloat16_ops(
self, torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05
)
_test_bfloat16_ops(
self,
torch.nn.AdaptiveAvgPool2d((3, 5)),
device,
inp_dims=(8, 4, 16, 16),
prec=0.05,
)
_test_bfloat16_ops(
self,
torch.nn.AdaptiveAvgPool3d((3, 5, 7)),
device,
inp_dims=(8, 4, 16, 16, 16),
prec=0.05,
)
def test_maxpool3d_non_square_backward(self, device):
# previous CUDA routine of this backward calculates kernel launch grid size
# with last two dimensions interchanged, so the tailing along the longer dim
# get ignored. Here we test whether every position gets gradient.
for dim in (2, 3, 4):
shape = tuple(32 if i != dim else 256 for i in range(4))
x = torch.randn(shape, device=device, requires_grad=True)
F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x.grad))
@slowTest
def test_adaptive_pool_odd_size(self, device):
# See https://github.com/pytorch/pytorch/issues/81409
Ih, Iw, Oh, Ow = 5873, 3693, 3527, 2219
imgs = torch.randint(low=0, high=256, size=(11, Ih, Iw), dtype=torch.float)
_imgs = F.adaptive_avg_pool2d(imgs, (Oh, Ow))
_imgs = F.adaptive_max_pool2d(imgs, (Oh, Ow))
Id, Ih, Iw, Od, Oh, Ow = 3, 5873, 3693, 3, 3527, 2219
imgs = torch.randint(low=0, high=256, size=(3, Id, Ih, Iw), dtype=torch.float)
F.adaptive_avg_pool3d(imgs, (Od, Oh, Ow))
F.adaptive_max_pool3d(imgs, (Od, Oh, Ow))
instantiate_device_type_tests(TestPoolingNNDeviceType, globals(), allow_mps=True)
instantiate_parametrized_tests(TestPoolingNN)
if __name__ == "__main__":
run_tests()
| TestPoolingNNDeviceType |
python | walkccc__LeetCode | solutions/240. Search a 2D Matrix II/240.py | {
"start": 0,
"end": 309
} | class ____:
def searchMatrix(self, matrix: list[list[int]], target: int) -> bool:
r = 0
c = len(matrix[0]) - 1
while r < len(matrix) and c >= 0:
if matrix[r][c] == target:
return True
if target < matrix[r][c]:
c -= 1
else:
r += 1
return False
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/engines.py | {
"start": 13086,
"end": 13624
} | class ____:
"""Proxy a DBAPI connection.
Tests can provide subclasses of this to intercept
DBAPI-level connection operations.
"""
def __init__(self, engine, conn, cursor_cls):
self.conn = conn
self.engine = engine
self.cursor_cls = cursor_cls
def cursor(self, *args, **kwargs):
return self.cursor_cls(self.engine, self.conn, *args, **kwargs)
def close(self):
self.conn.close()
def __getattr__(self, key):
return getattr(self.conn, key)
| DBAPIProxyConnection |
python | kamyu104__LeetCode-Solutions | Python/find-servers-that-handled-most-number-of-requests.py | {
"start": 65,
"end": 1600
} | class ____(object):
def busiestServers(self, k, arrival, load):
"""
:type k: int
:type arrival: List[int]
:type load: List[int]
:rtype: List[int]
"""
count = [0]*k
min_heap_of_endtimes = []
min_heap_of_nodes_after_curr = []
min_heap_of_nodes_before_curr = range(k)
for i, (t, l) in enumerate(itertools.izip(arrival, load)):
if i % k == 0:
min_heap_of_nodes_before_curr, min_heap_of_nodes_after_curr = [], min_heap_of_nodes_before_curr
while min_heap_of_endtimes and min_heap_of_endtimes[0][0] <= t:
_, free = heapq.heappop(min_heap_of_endtimes)
if free < i % k:
heapq.heappush(min_heap_of_nodes_before_curr, free)
else:
heapq.heappush(min_heap_of_nodes_after_curr, free)
min_heap_of_candidates = min_heap_of_nodes_after_curr if min_heap_of_nodes_after_curr else min_heap_of_nodes_before_curr
if not min_heap_of_candidates:
continue
node = heapq.heappop(min_heap_of_candidates)
count[node] += 1
heapq.heappush(min_heap_of_endtimes, (t+l, node))
max_count = max(count)
return [i for i in xrange(k) if count[i] == max_count]
# Time: O(nlogk)
# Space: O(k)
import sortedcontainers # required to do pip install
import itertools
import heapq
# reference: http://www.grantjenks.com/docs/sortedcontainers/sortedlist.html
| Solution |
python | optuna__optuna | optuna/terminator/improvement/evaluator.py | {
"start": 7976,
"end": 10019
} | class ____(BaseImprovementEvaluator):
"""Evaluates the stagnation period of the best value in an optimization process.
This class is initialized with a maximum stagnation period (``max_stagnation_trials``)
and is designed to evaluate the remaining trials before reaching this maximum period
of allowed stagnation. If this remaining trials reach zero, the trial terminates.
Therefore, the default error evaluator is instantiated by ``StaticErrorEvaluator(const=0)``.
Args:
max_stagnation_trials:
The maximum number of trials allowed for stagnation.
"""
def __init__(self, max_stagnation_trials: int = 30) -> None:
if max_stagnation_trials < 0:
raise ValueError("The maximum number of stagnant trials must not be negative.")
self._max_stagnation_trials = max_stagnation_trials
def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float:
self._validate_input(trials)
is_maximize_direction = True if (study_direction == StudyDirection.MAXIMIZE) else False
trials = [t for t in trials if t.state == TrialState.COMPLETE]
current_step = len(trials) - 1
best_step = 0
for i, trial in enumerate(trials):
best_value = trials[best_step].value
current_value = trial.value
assert best_value is not None
assert current_value is not None
if is_maximize_direction and (best_value < current_value):
best_step = i
elif (not is_maximize_direction) and (best_value > current_value):
best_step = i
return self._max_stagnation_trials - (current_step - best_step)
@classmethod
def _validate_input(cls, trials: list[FrozenTrial]) -> None:
if len([t for t in trials if t.state == TrialState.COMPLETE]) == 0:
raise ValueError(
"Because no trial has been completed yet, the improvement cannot be evaluated."
)
| BestValueStagnationEvaluator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_order01.py | {
"start": 315,
"end": 2233
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_order01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "column"})
chart2 = workbook.add_chart({"type": "bar"})
chart3 = workbook.add_chart({"type": "line"})
chart4 = workbook.add_chart({"type": "pie"})
chart1.axis_ids = [54976896, 54978432]
chart2.axis_ids = [54310784, 54312320]
chart3.axis_ids = [69816704, 69818240]
chart4.axis_ids = [69816704, 69818240]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet1.write_column("A1", data[0])
worksheet1.write_column("B1", data[1])
worksheet1.write_column("C1", data[2])
worksheet2.write_column("A1", data[0])
worksheet2.write_column("B1", data[1])
worksheet2.write_column("C1", data[2])
worksheet3.write_column("A1", data[0])
worksheet3.write_column("B1", data[1])
worksheet3.write_column("C1", data[2])
chart1.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart2.add_series({"values": "=Sheet2!$A$1:$A$5"})
chart3.add_series({"values": "=Sheet3!$A$1:$A$5"})
chart4.add_series({"values": "=Sheet1!$B$1:$B$5"})
worksheet1.insert_chart("E9", chart1)
worksheet2.insert_chart("E9", chart2)
worksheet3.insert_chart("E9", chart3)
worksheet1.insert_chart("E24", chart4)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | lib/spack/spack/util/environment.py | {
"start": 8326,
"end": 9258
} | class ____:
"""Base class for modifiers that modify the value of an environment variable."""
__slots__ = ("name", "value", "separator", "trace")
def __init__(
self, name: str, value: str, *, separator: str = os.pathsep, trace: Optional[Trace] = None
):
self.name = name.upper() if sys.platform == "win32" else name
self.value = value
self.separator = separator
self.trace = trace
def __eq__(self, other: object):
if not isinstance(other, NameValueModifier):
return NotImplemented
return (
self.name == other.name
and self.value == other.value
and self.separator == other.separator
)
def execute(self, env: MutableMapping[str, str]):
"""Apply the modification to the mapping passed as input"""
raise NotImplementedError("must be implemented by derived classes")
| NameValueModifier |
python | django__django | tests/check_framework/test_security.py | {
"start": 12551,
"end": 13542
} | class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False,
)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(base.check_content_type_nosniff(None), [base.W006])
@override_settings(MIDDLEWARE=[], SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_content_type_nosniff(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True,
)
def test_with_content_type_nosniff(self):
self.assertEqual(base.check_content_type_nosniff(None), [])
| CheckContentTypeNosniffTest |
python | kamyu104__LeetCode-Solutions | Python/dice-roll-simulation.py | {
"start": 58,
"end": 956
} | class ____(object):
def dieSimulator(self, n, rollMax):
"""
:type n: int
:type rollMax: List[int]
:rtype: int
"""
MOD = 10**9+7
def sum_mod(array):
return reduce(lambda x, y: (x+y)%MOD, array)
dp = [[1] + [0]*(rollMax[i]-1) for i in xrange(6)] # 0-indexed
for _ in xrange(n-1):
new_dp = [[0]*rollMax[i] for i in xrange(6)]
for i in xrange(6):
for k in xrange(rollMax[i]):
for j in xrange(6):
if i == j:
if k < rollMax[i]-1: # 0-indexed
new_dp[j][k+1] = (new_dp[j][k+1]+dp[i][k])%MOD
else:
new_dp[j][0] = (new_dp[j][0]+dp[i][k])%MOD
dp = new_dp
return sum_mod(sum_mod(row) for row in dp)
| Solution |
python | TheAlgorithms__Python | data_structures/hashing/quadratic_probing.py | {
"start": 60,
"end": 2361
} | class ____(HashTable):
"""
Basic Hash Table example with open addressing using Quadratic Probing
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _collision_resolution(self, key, data=None): # noqa: ARG002
"""
Quadratic probing is an open addressing scheme used for resolving
collisions in hash table.
It works by taking the original hash index and adding successive
values of an arbitrary quadratic polynomial until open slot is found.
Hash + 1², Hash + 2², Hash + 3² .... Hash + n²
reference:
- https://en.wikipedia.org/wiki/Quadratic_probing
e.g:
1. Create hash table with size 7
>>> qp = QuadraticProbing(7)
>>> qp.insert_data(90)
>>> qp.insert_data(340)
>>> qp.insert_data(24)
>>> qp.insert_data(45)
>>> qp.insert_data(99)
>>> qp.insert_data(73)
>>> qp.insert_data(7)
>>> qp.keys()
{11: 45, 14: 99, 7: 24, 0: 340, 5: 73, 6: 90, 8: 7}
2. Create hash table with size 8
>>> qp = QuadraticProbing(8)
>>> qp.insert_data(0)
>>> qp.insert_data(999)
>>> qp.insert_data(111)
>>> qp.keys()
{0: 0, 7: 999, 3: 111}
3. Try to add three data elements when the size is two
>>> qp = QuadraticProbing(2)
>>> qp.insert_data(0)
>>> qp.insert_data(999)
>>> qp.insert_data(111)
>>> qp.keys()
{0: 0, 4: 999, 1: 111}
4. Try to add three data elements when the size is one
>>> qp = QuadraticProbing(1)
>>> qp.insert_data(0)
>>> qp.insert_data(999)
>>> qp.insert_data(111)
>>> qp.keys()
{4: 999, 1: 111}
"""
i = 1
new_key = self.hash_function(key + i * i)
while self.values[new_key] is not None and self.values[new_key] != key:
i += 1
new_key = (
self.hash_function(key + i * i)
if not self.balanced_factor() >= self.lim_charge
else None
)
if new_key is None:
break
return new_key
if __name__ == "__main__":
import doctest
doctest.testmod()
| QuadraticProbing |
python | pytorch__pytorch | test/dynamo/test_decorators.py | {
"start": 429,
"end": 67464
} | class ____(torch._dynamo.test_case.TestCase):
def test_disallow_in_graph(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(a):
x = torch.add(a, 1)
x = torch.add(x, 1)
x = torch.sub(x, 1)
x = torch.add(x, 1)
x = torch.add(x, 1)
return x
torch._dynamo.disallow_in_graph(torch.sub)
fn(torch.randn(10))
torch._dynamo.allow_in_graph(torch.sub)
# check for graph break on sub
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 4)
def test_disable_for_custom_op(self):
import torch.library
from torch.library import Library
foo = Library("foo", "DEF") # noqa: TOR901
foo.define("custom(Tensor self) -> Tensor")
# Dynamic shape data dependent operator. For static shape compilation, Dynamo
# should graph break on it. But, the meta kernel is not implemented properly.
@torch.library.impl(foo, "custom", "CPU")
def foo_cpu(x):
return x.nonzero()
# Disallow does not work because of extra python frames with torch.library python API
torch.ops.foo.custom = torch._dynamo.disable(torch.ops.foo.custom)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.foo.custom(a)
c = torch.cos(b)
return c
x = torch.randint(2, (100,))
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(ref, res)
def test_disable_ignores_outer_wraps(self):
def orig_inner():
pass
def inner():
pass
inner._torchdynamo_orig_callable = orig_inner
@functools.wraps(inner)
def wrapper():
raise AssertionError("wrapper called")
# This behavior is not ideal, but supporting it would add overhead
# to callsites of eval_frame.innermost_fn. A warning would also be very noisy.
torch._dynamo.disable(fn=wrapper, recursive=True)
def test_disable_nn_modules_forward_hook(self):
class SimpleLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(4, 4)
def forward(self, inp):
return self.layer0(torch.sigmoid(inp))
class SimpleModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = SimpleLinear()
self.layer1 = torch.nn.Linear(4, 4)
def forward(self, inp):
z = self.layer0(torch.sin(inp))
return self.layer1(z)
def hook(module, args):
inp = args[0].sigmoid()
return (inp,)
model = SimpleModel()
model.layer0.register_forward_pre_hook(hook)
# Disable my monkeypatching
model.layer0 = torch._dynamo.disable(model.layer0)
cnts = torch._dynamo.testing.CompileCounterWithBackend("eager")
opt_model = torch.compile(model, backend=cnts)
opt_model(torch.randn(4))
# check for no graph break
self.assertEqual(cnts.frame_count, 2)
gm0 = cnts.graphs[0]
# Check that the first graph has sin node, and no sigmoid
self.assertTrue(any(node.target is torch.sin for node in gm0.graph.nodes))
self.assertTrue(
all(node.target is not torch.sigmoid for node in gm0.graph.nodes)
)
gm1 = cnts.graphs[1]
# Check that the first graph does not have sigmoid. sigmoid is used in
# both hook and disabled module.
self.assertTrue(
all(node.target is not torch.sigmoid for node in gm1.graph.nodes)
)
def test_disable_nn_module_with_class_decorator(self):
cnts = torch._dynamo.testing.CompileCounterWithBackend("eager")
@torch._dynamo.disable
class SimpleLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(4, 4)
def forward(self, inp):
return self.layer0(torch.sigmoid(inp))
@torch.compile(backend=cnts)
class SimpleModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = SimpleLinear()
self.layer1 = torch.nn.Linear(4, 4)
def forward(self, inp):
z = self.layer0(torch.sin(inp))
return self.layer1(z)
def hook(module, args):
inp = args[0].sigmoid()
return (inp,)
model = SimpleModel()
model.layer0.register_forward_pre_hook(hook)
model(torch.randn(4))
# check for no graph break
self.assertEqual(cnts.frame_count, 2)
gm0 = cnts.graphs[0]
# Check that the first graph has sin node, and no sigmoid
self.assertTrue(any(node.target is torch.sin for node in gm0.graph.nodes))
self.assertTrue(
all(node.target is not torch.sigmoid for node in gm0.graph.nodes)
)
gm1 = cnts.graphs[1]
# Check that the first graph does not have sigmoid. sigmoid is used in
# both hook and disabled module.
self.assertTrue(
all(node.target is not torch.sigmoid for node in gm1.graph.nodes)
)
def test_allow_in_graph(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(a):
x = torch.add(a, 1)
x = torch.add(x, 1)
x = my_custom_function(x)
x = torch.add(x, 1)
x = torch.add(x, 1)
return x
torch._dynamo.allow_in_graph(my_custom_function)
fn(torch.randn(10))
torch._dynamo.disallow_in_graph(my_custom_function)
# check for no graph break
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 5)
def test_allow_in_graph_no_id_reuse(self):
cnts = torch._dynamo.testing.CompileCounter()
def do_allow_in_graph(x):
return x + 1
torch._dynamo.allow_in_graph(do_allow_in_graph)
del do_allow_in_graph
# `id(dont_allow_in_graph)` would likely match `id(do_allow_in_graph)`
# We want to make sure Dynamo always trace through
# `dont_allow_in_graph`, by checking for the explicit graph break.
def dont_allow_in_graph(x):
torch._dynamo.graph_break()
return x + 1
@torch.compile(backend=cnts)
def fn(a):
x = torch.add(a, 1)
x = torch.add(x, 1)
x = dont_allow_in_graph(x)
x = torch.add(x, 1)
x = torch.add(x, 1)
return x
fn(torch.randn(10))
# Check for graph break
self.assertEqual(cnts.frame_count, 3)
def test_incorrect_usage_disallow_in_graph(self):
with self.assertRaises(IncorrectUsage):
@torch._dynamo.disallow_in_graph
def fn1(x):
return x.cos()
def test_nonstrict_trace_tensor_args(self):
@torch._dynamo.nonstrict_trace
def trace_me(x, y, z):
torch._dynamo.graph_break()
return x * y + z
def fn(x, y):
t0 = x + 1
t1 = trace_me(x, y, t0)
t2 = t1 + y
return t0 * t2
x, y = torch.randn(10), torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_nonstrict_trace_pre_existing_dict(self):
@torch._dynamo.nonstrict_trace
def trace_me(x, d):
torch._dynamo.graph_break()
return x * d["a"]
def fn(x, d):
t0 = trace_me(x, d)
return t0 + 1
x = torch.randn(10)
d = {"a": 2}
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, d)
res = opt_fn(x, d)
self.assertEqual(ref, res)
def test_nonstrict_trace_newly_constructed_dict_with_side_effects(self):
@torch._dynamo.nonstrict_trace
def trace_me(x, d):
torch._dynamo.graph_break()
return x * d["a"]
def fn(x):
d = {}
d["a"] = 2
t0 = trace_me(x, d)
return t0 + 1
x = torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_nonstrict_trace_pre_existing_dict_with_side_effects(self):
@torch._dynamo.nonstrict_trace
def trace_me(x, d):
torch._dynamo.graph_break()
return x * d["a"]
def fn(x, d):
d["a"] = x + 1
t0 = trace_me(x, d)
return t0 + 2
x = torch.randn(10)
d0 = {"a": 0}
d1 = dict(d0)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, d0)
res = opt_fn(x, d1)
self.assertEqual(ref, res)
self.assertEqual(d0, d1)
def test_nonstrict_trace_pre_existing_custom_class(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
torch.utils._pytree.register_pytree_node(
Point,
lambda p: ((p.x, p.y), ()),
lambda xy, _: Point(xy[0], xy[1]),
)
@torch._dynamo.nonstrict_trace
def trace_me(p):
torch._dynamo.graph_break()
return p.x * p.y
def fn(p):
res = trace_me(p)
return res, p.x, p.y
p = Point(torch.ones(10), torch.ones(1))
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(p)
res = opt_fn(p)
self.assertEqual(ref, res)
def test_nonstrict_trace_pre_existing_custom_class_with_side_effects(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
torch.utils._pytree.register_pytree_node(
Point,
lambda p: ((p.x, p.y), ()),
lambda xy, _: Point(xy[0], xy[1]),
)
@torch._dynamo.nonstrict_trace
def trace_me(p):
torch._dynamo.graph_break()
return p.x * p.y
def fn(p):
p.x = p.x + 1
p.y = p.y + 2
res = trace_me(p)
return res, p.x, p.y
p1 = Point(torch.ones(10), torch.ones(1))
p2 = Point(torch.ones(10), torch.ones(1))
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(p1)
res = opt_fn(p2)
self.assertEqual(ref, res)
self.assertEqual(p1.x, p2.x)
self.assertEqual(p1.y, p2.y)
def test_nonstrict_trace_newly_constructed_custom_class_with_side_effects(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
torch.utils._pytree.register_pytree_node(
Point,
lambda p: ((p.x, p.y), ()),
lambda xy, _: Point(xy[0], xy[1]),
)
@torch._dynamo.nonstrict_trace
def trace_me(p):
torch._dynamo.graph_break()
return p.x * p.y
def fn(x, y):
p = Point(x, y)
p.x = p.x + 1
p.y = p.y + 2
res = trace_me(p)
return res, p.x, p.y
x, y = torch.ones(10), torch.ones(1)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_nonstrict_trace_nested_custom_class(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
class PointTensor:
p: Point
t: torch.Tensor
def __init__(self, p, t):
self.p = p
self.t = t
torch.utils._pytree.register_pytree_node(
PointTensor,
lambda pt: ((pt.p, pt.t), ()),
lambda pt, _: PointTensor(pt[0], pt[1]),
)
torch.utils._pytree.register_pytree_node(
Point,
lambda p: ((p.x, p.y), ()),
lambda xy, _: Point(xy[0], xy[1]),
)
def trace_point(p):
torch._dynamo.graph_break()
return p.x * p.y
@torch._dynamo.nonstrict_trace
def trace_point_tensor(pt):
torch._dynamo.graph_break()
return pt.t + trace_point(pt.p)
def fn(x, y):
p = Point(x, y)
t = x + y
pt = PointTensor(p, t)
res = trace_point_tensor(pt)
return res
x, y = torch.ones(10), torch.ones(1)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_nonstrict_trace_pre_existing_register_constant_type_guard(self):
class State:
def __init__(self, n):
self.n = n
def get_num(self):
torch._dynamo.graph_break()
return self.n
def __eq__(self, other):
return isinstance(other, State) and self.n == other.n
def __hash__(self):
return hash(self.n)
# Assume `State` is implemented in C, and the author didn't bother to
# provide a pytree decomposition for it, and its instances are safe to
# treat as a constant by `torch.compile`.
torch.utils._pytree.register_constant(State)
@torch._dynamo.nonstrict_trace
def trace_me(x, s):
return x * s.get_num()
cnts = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
@torch.compile(fullgraph=True, backend=cnts)
def fn(x, s):
res = trace_me(x, s)
return res
x = torch.ones(10)
# Make sure recompilation didn't happen.
self.assertEqual(cnts.frame_count, 0)
fn(x, State(42))
self.assertEqual(cnts.frame_count, 1)
fn(x, State(42))
self.assertEqual(cnts.frame_count, 1)
# Make sure recompilation did happen.
fn(x, State(41))
self.assertEqual(cnts.frame_count, 2)
def test_nonstrict_trace_int_and_float_output(self):
@torch._dynamo.nonstrict_trace
def trace_me(x):
torch._dynamo.graph_break()
return len(x.shape), 0.42
def fn(x):
n1, n2 = trace_me(x)
return x * n1 + n2
x = torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_nonstrict_trace_tuple_and_sym_int_output(self):
@torch._dynamo.nonstrict_trace
def trace_me(x):
torch._dynamo.graph_break()
return x + 1, x.size(0)
def fn(x):
t0, n = trace_me(x)
return t0 * n
x = torch.randn(10)
opt_fn = torch.compile(fn, dynamic=True, fullgraph=True, backend="aot_eager")
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_nonstrict_trace_inside_compiled_function(self):
def trace_me(x):
torch._dynamo.graph_break()
return x + 42
def fn(x):
res = torch._dynamo.nonstrict_trace(trace_me)(x)
return res + 1
x = torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_nonstrict_trace_inside_compiled_function_kwarg(self):
def trace_me(x):
torch._dynamo.graph_break()
return x + 42
def fn(x):
res = torch._dynamo.nonstrict_trace(traceable_fn=trace_me)(x)
return res + 1
x = torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_nonstrict_trace_on_method(self):
class Num:
def __init__(self, n):
self.n = n
@torch._dynamo.nonstrict_trace
def trace_me(self, t):
torch._dynamo.graph_break()
return t + self.n
torch.utils._pytree.register_pytree_node(
Num,
lambda num: ((num.n,), ()),
lambda n, _: Num(n[0]),
)
def fn(x, n):
num = Num(n)
return num.trace_me(x)
x, n = torch.randn(10), 42
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, n)
res = opt_fn(x, n)
self.assertEqual(ref, res)
def test_nonstrict_trace_captured_external_tensor(self):
cst = torch.ones(1)
@torch._dynamo.nonstrict_trace
def trace_me(x, y):
torch._dynamo.graph_break()
return x * y + cst
def fn(x, y):
return trace_me(x, y)
x, y = torch.randn(10), torch.randn(10)
opt_fn = torch.compile(fn, fullgraph=True, backend="aot_eager")
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_nonstrict_trace_no_action_at_a_distance(self):
def trace_me(x):
torch._dynamo.graph_break()
return x + 42
# No effect on traceability of `trace_me`
torch._dynamo.nonstrict_trace(trace_me)
def fn(x):
res = trace_me(x)
return res + 1
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
opt_fn = torch.compile(fn, backend=cnts)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
# There should be 1 graph break
self.assertEqual(cnts.frame_count, 2)
def test_nonstrict_trace_inside_compiled_function_error(self):
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(x, y):
def trace_me(x, y):
torch._dynamo.graph_break()
return x * y
res = torch._dynamo.nonstrict_trace(trace_me)(x, y)
return res + 1
try:
fn(torch.ones(10), torch.ones(1))
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
msg = "Applying `nonstrict_trace` to function <trace_me>; however, `nonstrict_trace` currently requires the function to be defined outside `torch.compile` region." # NOQA: B950
self.assertIn(msg, str(e))
def test_nonstrict_trace_custom_class_error(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
@torch._dynamo.nonstrict_trace
def trace_me(p):
torch._dynamo.graph_break()
return p.x * p.y
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(p):
res = trace_me(p)
return res + 1
try:
p = Point(torch.ones(10), torch.ones(1))
fn(p)
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
self.assertIn("Invalid input type for nonstrict_trace-ed function", str(e))
def test_nonstrict_trace_nested_custom_class_error(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
class PointTensor:
p: Point
t: torch.Tensor
def __init__(self, p, t):
self.p = p
self.t = t
torch.utils._pytree.register_pytree_node(
PointTensor,
lambda pt: ((pt.p, pt.t), ()),
lambda pt, _: PointTensor(pt[0], pt[1]),
)
def trace_point(p):
torch._dynamo.graph_break()
return p.x * p.y
@torch._dynamo.nonstrict_trace
def trace_point_tensor(pt):
torch._dynamo.graph_break()
return pt.t + trace_point(pt.p)
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(x, y):
p = Point(x, y)
t = x + y
pt = PointTensor(p, t)
res = trace_point_tensor(pt)
return res
try:
fn(torch.ones(10), torch.ones(1))
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
self.assertIn("Invalid input type for nonstrict_trace-ed function", str(e))
def test_nonstrict_trace_custom_class_output_error(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
@torch._dynamo.nonstrict_trace
def trace_me(x):
torch._dynamo.graph_break()
return Point(x, x + 1)
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(x):
p = trace_me(x)
return p.x * p.y
try:
x = torch.ones(10)
fn(x)
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
self.assertIn(
"Unsupported output type for nonstrict_trace-ed function", str(e)
)
def test_nonstrict_newly_constructed_trace_register_constant_type_error(self):
class State:
def __init__(self, n):
self.n = n
def get_num(self):
torch._dynamo.graph_break()
return self.n
def __eq__(self, other):
return isinstance(other, State) and self.n == other.n
def __hash__(self):
return hash(self.n)
# Assume `State` is implemented in C, and the author didn't bother to
# provide a pytree decomposition for it, and its instances are safe to
# treat as a constant by `torch.compile`.
torch.utils._pytree.register_constant(State)
@torch._dynamo.nonstrict_trace
def trace_me(x, s):
return x * s.get_num()
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(x):
s = State(10)
res = trace_me(x, s)
return res
try:
x = torch.ones(10)
fn(x)
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
self.assertIn(
"Input marked with `pytree.register_constant` constructed in the `torch.compile` region",
str(e),
)
def test_nonstrict_trace_object_in_context_error(self):
class Point:
x: torch.Tensor
y: torch.Tensor
def __init__(self, x, y):
self.x = x
self.y = y
class PointTensor:
p: Point
t: torch.Tensor
def __init__(self, p, t):
self.p = p
self.t = t
torch.utils._pytree.register_pytree_node(
PointTensor,
lambda pt: ((pt.t,), pt.p),
lambda ts, p: PointTensor(p, ts[0]),
)
@torch._dynamo.nonstrict_trace
def trace_me(pt):
torch._dynamo.graph_break()
return pt.t + pt.p.x * pt.p.y
@torch.compile(fullgraph=True, backend="aot_eager")
def fn(x, y):
p = Point(x, y)
t = x + y
pt = PointTensor(p, t)
res = trace_me(pt)
return res
try:
x, y = torch.ones(10), torch.ones(1)
fn(x, y)
self.assertFalse(True) # must raise error before this
except torch._dynamo.exc.Unsupported as e:
self.assertIn(
"Invalid use of pytree_flatten with nonstrict_trace-ed function", str(e)
)
def test_graph_break(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x):
x = torch.cos(x)
x = torch.cos(x)
torch._dynamo.graph_break()
x = torch.cos(x)
x = torch.cos(x)
torch._dynamo.graph_break()
x = torch.cos(x)
x = torch.cos(x)
return x
fn(torch.randn(4, 5))
self.assertEqual(cnts.frame_count, 3)
self.assertEqual(cnts.op_count, 6)
def test_skip_frame(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x):
x = x + 1
torch._dynamo.skip_frame()
return x + 1
inp = torch.ones(3, 3)
self.assertEqual(fn(inp), inp + 2)
self.assertEqual(cnts.frame_count, 0)
@torch.compile(backend=cnts)
def gn(x):
x = x + 1
torch._dynamo.graph_break()
x = x + 1
torch._dynamo.skip_frame()
return x + 1
self.assertEqual(gn(inp), inp + 3)
self.assertEqual(cnts.frame_count, 1)
def test_step_unsupported(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x):
x = x + 1 + 2
torch._dynamo.step_unsupported()
return x + 4
inp = torch.ones(3)
self.assertEqual(fn(inp), inp + 7)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_step_unsupported_empty_checkpoint(self):
@torch.compile(backend="eager")
def fn(x):
torch._dynamo.step_unsupported()
return x + 1
inp = torch.ones(3)
self.assertEqual(fn(inp), inp + 1)
@skipIfWindows(
msg="TODO: (xuhancn), confirm if torch.compiler.disable work on Windows."
)
def test_disable_recursive_false(self):
def fn2(x):
return x + 1
@torch._dynamo.disable(recursive=False)
def fn1(x):
if torch.compiler.is_compiling():
raise RuntimeError("bad")
x = x.sigmoid()
return fn2(x.cos())
def fn(x):
return fn1(x.tan())
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
opt_fn(torch.randn(4))
self.assertEqual(cnts.frame_count, 2)
# test that applying disable nonrecursive doesn't modify the original function
def fn3(x):
if torch.compiler.is_compiling():
return x - 1
return fn2(x) + 2
@torch.compile(backend=cnts)
def outer(f, x):
return f(x)
inp = torch.ones(3)
fn3_disabled = torch._dynamo.disable(fn3, recursive=False)
torch._dynamo.reset()
cnts.clear()
res = outer(fn3, inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, inp - 1)
cnts.clear()
res = outer(fn3_disabled, inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, inp + 3)
torch._dynamo.reset()
cnts.clear()
res = outer(fn3_disabled, inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, inp + 3)
cnts.clear()
res = outer(fn3, inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, inp - 1)
# directly compiling a disabled function should result in a compile
torch._dynamo.reset()
cnts.clear()
res = torch.compile(fn3_disabled, backend=cnts)(inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, inp - 1)
def test_disable_recursive_false_weird(self):
from torch._dynamo.types import FrameAction, FrameExecStrategy
# test the case where the next invocation of the function is
# manually skipped
def fn(x):
if torch.compiler.is_compiling():
return x - 1
return x + 1
fn_disabled = torch._dynamo.disable(fn, recursive=False)
torch._dynamo.eval_frame.set_code_exec_strategy(
fn.__code__, FrameExecStrategy(FrameAction.SKIP, FrameAction.DEFAULT)
)
@torch.compile(backend="eager")
def outer(fn, x):
return fn(x)
inp = torch.ones(3)
self.assertEqual(outer(fn_disabled, inp), inp + 1)
torch._dynamo.eval_frame.set_code_exec_strategy(
fn.__code__, FrameExecStrategy(FrameAction.DEFAULT, FrameAction.DEFAULT)
)
self.assertEqual(torch.compile(fn, backend="eager")(inp), inp - 1)
def test_substitute_in_graph(self):
counters.clear()
# NB: Choose another C function for test when we support operator.indexOf
# out of the box
cnts = torch._dynamo.testing.CompileCounter()
fn = operator.indexOf
opt_fn = torch.compile(fn, backend=cnts)
out = fn([1, 2, 3, 4, 5], 3)
opt_out = opt_fn([1, 2, 3, 4, 5], 3)
self.assertEqual(out, opt_out)
self.assertEqual(cnts.frame_count, 0)
self.assertEqual(len(counters["graph_break"]), 1)
torch._dynamo.reset()
counters.clear()
with self.assertRaisesRegex(TypeError, "Signature mismatch"):
@torch._dynamo.substitute_in_graph(operator.indexOf)
def _(sequence, x):
for i, item in enumerate(sequence):
if item is x or item == x:
return i
raise ValueError("sequence.index(x): x not in sequence")
@torch._dynamo.substitute_in_graph(operator.indexOf)
def polyfill(a, b):
for i, item in enumerate(a):
if item is b or item == b:
return i
raise ValueError("sequence.index(x): x not in sequence")
cnts = torch._dynamo.testing.CompileCounter()
fn = operator.indexOf
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
out = fn([1, 2, 3, 4, 5], 3)
opt_out = opt_fn([1, 2, 3, 4, 5], 3)
self.assertEqual(out, opt_out)
self.assertEqual(cnts.frame_count, 0)
self.assertEqual(len(counters["graph_break"]), 0)
torch._dynamo.reset()
counters.clear()
cnts = torch._dynamo.testing.CompileCounter()
fn = polyfill
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
out = fn([1, 2, 3, 4, 5], 3)
opt_out = opt_fn([1, 2, 3, 4, 5], 3)
self.assertEqual(out, opt_out)
self.assertEqual(cnts.frame_count, 0)
self.assertEqual(len(counters["graph_break"]), 0)
@patch.object(torch._dynamo.config, "suppress_errors", True)
def test_nested_disable_decorator(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.disable()
def fn1(x):
return torch.sin(x) * 10
@torch.compile(backend=cnts)
def fn2(x):
x = x + 1
x = x + 1
x = fn1(x) # graph break
x = x + 1
x = x + 1
return x
@torch.compile(backend=cnts, fullgraph=True)
def fn3(x):
return fn2(x)
fn2(torch.randn(4, 5))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 4)
with self.assertRaisesRegex(
Unsupported, r"Skip calling `torch.compiler.disable\(\)`d function"
):
fn3(torch.randn(4, 5))
def test_disable_optimize(self):
cnt = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnt, disable=True)
def f1(x):
return x + 1
f1(torch.ones(6))
self.assertEqual(cnt.frame_count, 0)
@torch.compile(backend=cnt, disable=True)
def f2(x):
return x + 1
f2(torch.ones(6))
self.assertEqual(cnt.frame_count, 0)
with patch.dict(os.environ, {"TORCHDYNAMO_DISABLE": "1"}):
@torch.compile(backend=cnt)
def f3(x):
return x + 1
f3(torch.ones(6))
self.assertEqual(cnt.frame_count, 0)
def test_torch_guards_stack_frame_register_inlining_disable(self):
x = torch.tensor([0.5, 0.5])
class encoder(torch.nn.Module):
def __init__(self, y):
super().__init__()
self.a = y
@torch._dynamo.disable
def helper(self, x, y):
return x * y
def forward(self, a, *args):
x = a + a
return self.helper(x, self.a)
e = encoder(2.0)
seen_frames = []
import contextlib
@contextlib.contextmanager
def global_context_capture_fn(frame_summary):
if frame_summary is not None:
seen_frames.append(frame_summary)
yield
with mock.patch(
"torch._guards.TracingContext.current_frame",
side_effect=global_context_capture_fn,
):
torch.compile(e, backend="eager")(x)
self.assertEqual(len(seen_frames), 0)
def test_torch_guards_stack_frame_register_inlining_partially_disable(self):
y = torch.nn.Parameter(torch.tensor([0.25, 0.25]))
x = torch.tensor([0.5, 0.5])
class encoder(torch.nn.Module):
def __init__(self, y):
super().__init__()
self.register_parameter("param", y)
@torch._dynamo.disable
def helper_disabled(self, x, y):
return x.sin() * y.cos()
def helper(self, x, y):
return x * y
def forward(self, a, *args):
x = a + a
return self.helper(x, self.param) + self.helper_disabled(x, self.param)
e = encoder(y)
cnt = torch._dynamo.testing.CompileCounter()
torch.compile(e, backend=cnt)(x)
# first frame is before disable, second frame is after disable
self.assertEqual(cnt.frame_count, 2)
self.assertEqual(cnt.op_count, 3)
def _test_mark_static_address(self, guarded):
# This test verifies that dynamo properly marks inputs as static
# when using the mark_static_address API.
# For both inline_inbuilt_nn_modules True and False, we expect the
# tensor to be present in the buffers attribute of the graph.
compiles_with_buffers = 0
compiles = 0
def debug_compiler(gm, _):
nonlocal compiles_with_buffers
nonlocal compiles
compiles_with_buffers += len(gm._buffers) > 0
compiles += 1
return gm
@torch.compile(backend=debug_compiler)
def fn(x):
return x + 1
inp = torch.ones(2)
torch._dynamo.mark_static_address(inp, guard=guarded)
fn(inp)
if guarded:
self.assertEqual(compiles_with_buffers, 1)
inp2 = torch.ones(2)
# if guarded, should trigger another recompile
# since it was not marked static, compiles with buffers
# should not be incremented
fn(inp2)
if guarded:
self.assertEqual(compiles_with_buffers, 1)
self.assertEqual(compiles, 2 if guarded else 1)
def test_mark_static_address_guarded(self):
with torch._dynamo.config.patch("inline_inbuilt_nn_modules", True):
self._test_mark_static_address(guarded=True)
self._test_mark_static_address(guarded=True)
def test_mark_static_address_unguarded(self):
with torch._dynamo.config.patch("inline_inbuilt_nn_modules", True):
self._test_mark_static_address(guarded=False)
self._test_mark_static_address(guarded=False)
def test_class_methods(self):
class A:
@classmethod
def my_class_method(cls, arg1):
return cls, arg1
@staticmethod
def my_static_method(arg1):
return None, arg1
def my_regular_method(self, arg1):
return self, arg1
class B(A):
def my_class_method(self, arg1):
return super().my_class_method(arg1)
def my_static_method(self, arg1):
return super().my_static_method(arg1)
class C(A):
@classmethod
def my_class_method(cls, arg1):
return super().my_class_method(arg1)
cnt = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnt)
def fn(a, b, c):
# We want a function that does not graph break but
# does generate custom bytecode
v1 = a.my_class_method(1)
v2 = A.my_class_method(2)
v3 = a.my_static_method(3)
v4 = A.my_static_method(4)
v5 = a.my_regular_method(5)
v6 = b.my_class_method(6)
v7 = b.my_static_method(7)
v8 = c.my_class_method(8)
v9 = C.my_class_method(9)
torch.rand(2)
return v1, v2, v3, v4, v5, v6, v7, v8, v9
a, b, c = A(), B(), C()
v1, v2, v3, v4, v5, _, v7, v8, v9 = fn(a, b, c)
self.assertEqual(v1, (A, 1))
self.assertEqual(v2, (A, 2))
self.assertEqual(v3, (None, 3))
self.assertEqual(v4, (None, 4))
self.assertEqual(v5, (a, 5))
# TODO fix me: we do not resolve classmethods properly
# from a regular method
# self.assertEqual(v6, (B, 6))
self.assertEqual(v7, (None, 7))
self.assertEqual(v8, (C, 8))
self.assertEqual(v9, (C, 9))
self.assertEqual(cnt.frame_count, 1)
def test_assume_constant_result_on_user_defined_fn(self):
@torch._dynamo.assume_constant_result
def const_fn(n, s):
return torch.full([n], s)
def fn(B):
B = const_fn(B.size(0), 13)
X = B * 2
return X.tolist()
B_list = [8] * 32
B = torch.tensor(B_list, dtype=torch.int32)
torch._dynamo.decorators.mark_static(B, 0)
with torch._dynamo.config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
):
self.assertEqual(
fn(B),
torch.compile(fn, backend="eager", fullgraph=True, dynamic=True)(B),
)
def test_assume_constant_result_on_computation_with_graph_input(self):
@torch._dynamo.assume_constant_result
def check(y):
return y[0].item() == 1
def fn(x, y):
if check(y):
return x + 2
else:
return x + 1
y = torch.tensor([1])
x = torch.tensor(1)
self.assertEqual(fn(x, y), torch.compile(fn)(x, y))
def test_set_stance_aot_eager_then_compile(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x, y, z):
return x * y * z[0]
with torch.compiler.set_stance("aot_eager_then_compile"):
fn(2, torch.randn(2), {0: torch.randn(2)})
fn(3, torch.randn(3), {0: torch.randn(3)})
fn(4, torch.randn(4), {0: torch.randn(4)})
# Would have been 4 without stance
self.assertEqual(cnts.op_count, 2)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_mark_static_nn_module(self):
@torch._dynamo.mark_static
class Mock(torch.nn.Module):
def __init__(self, c):
super().__init__()
self.c = c
def forward(self, x):
return x * self.c
cnts = torch._dynamo.testing.CompileCounter()
mod1 = Mock(10)
mod2 = Mock(20)
mod3 = Mock(30)
opt_mod1 = torch.compile(mod1, backend=cnts, fullgraph=True)
opt_mod2 = torch.compile(mod2, backend=cnts, fullgraph=True)
opt_mod3 = torch.compile(mod3, backend=cnts, fullgraph=True)
x = torch.randn(4, 4)
opt_mod1(x)
opt_mod2(x)
opt_mod3(x)
# Must be 3 compilations. If not marked static there would be 2, because self.c would be converted to symints.
self.assertEqual(cnts.frame_count, 3)
def test_set_stance_eager_then_compile(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x, y, z):
return x * y * z[0]
with torch.compiler.set_stance("eager_then_compile"):
fn(1, torch.randn(1), {0: torch.randn(1)})
fn(2, torch.randn(2), {0: torch.randn(2)})
fn(3, torch.randn(3), {0: torch.randn(3)})
self.assertEqual(cnts.frame_count, 1)
def test_set_stance_eager_then_compile_with_graph_break(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x, y, z):
y = torch.sin(y)
torch._dynamo.graph_break()
y = torch.cos(y)
return x * y * z[0]
with torch.compiler.set_stance("eager_then_compile"):
fn(1, torch.randn(1), {0: torch.randn(1)})
fn(2, torch.randn(2), {0: torch.randn(2)})
fn(3, torch.randn(3), {0: torch.randn(3)})
# frame count 2 since we added a graph break
self.assertEqual(cnts.frame_count, 2)
def test_set_stance_force_eager(self):
@torch.compile(backend="eager")
def a(x):
if torch._dynamo.is_compiling():
return x + 1
return x + 2
@torch.compiler.set_stance("force_eager")
def b(x):
return a(x)
def c(x):
out0 = a(x)
with torch.compiler.set_stance("force_eager"):
out1 = a(x)
return out0, out1, a(x)
inp = torch.ones(3)
# test that decorating b has no overall side effect
self.assertEqual(a(inp), inp + 1)
self.assertEqual(b(inp), inp + 2)
self.assertEqual(c(inp), (inp + 1, inp + 2, inp + 1))
torch.compiler.set_stance("force_eager")
self.assertEqual(a(inp), inp + 2)
torch.compiler.set_stance("default")
self.assertEqual(a(inp), inp + 1)
def test_set_stance_eager_on_recompile(self):
@torch.compile(backend="eager", dynamic=False)
def a(x, n):
if torch._dynamo.is_compiling():
return x + n + 1
return x + n + 2
inp = torch.ones(3)
out1 = a(inp, 1)
with torch.compiler.set_stance("eager_on_recompile"):
out2 = a(inp, 1)
out3 = a(inp, 2)
self.assertEqual(out1, inp + 2)
self.assertEqual(out2, inp + 2)
self.assertEqual(out3, inp + 4)
def test_set_stance_fail_on_recompile(self):
@torch.compile(backend="eager", dynamic=False)
def a(x, n):
if torch._dynamo.is_compiling():
return x + n + 1
return x + n + 2
inp = torch.ones(3)
out1 = a(inp, 1)
with torch.compiler.set_stance("fail_on_recompile"):
out2 = a(inp, 1)
with self.assertRaisesRegex(RuntimeError, "fail_on_recompile"):
a(inp, 2)
self.assertEqual(out1, inp + 2)
self.assertEqual(out2, inp + 2)
def test_fail_on_recompile_shows_guard_details(self):
@torch.compile(backend="eager", dynamic=False)
def f(x):
return x + 1
f(torch.ones(4))
f(torch.ones(5))
def post_munge(s):
return re.sub(r"line number: \d+", "line number: N", s)
with torch.compiler.set_stance("fail_on_recompile"):
f(torch.ones(4))
self.assertExpectedInlineMunged(
RuntimeError,
lambda: f(torch.ones(7)),
"""\
Detected recompile when torch.compile stance is 'fail_on_recompile'. filename: 'test_decorators.py', function name: 'f', line number: N
triggered by the following guard failure(s):
- 0/0: tensor 'x' size mismatch at index 0. expected 4, actual 7
- 0/1: tensor 'x' size mismatch at index 0. expected 5, actual 7""", # noqa: B950
post_munge=post_munge,
)
def test_set_stance_fail_on_recompile_with_disable(self):
@torch.compiler.disable
def inner(x):
return x
@torch.compile(backend="eager")
def f(x):
return inner(x)
f(torch.randn(3, 3))
# should not raise error
with torch.compiler.set_stance("fail_on_recompile"):
f(torch.randn(3, 3))
def test_set_stance_forbid_in_graph(self):
@torch.compiler.set_stance("force_eager")
def a(x):
return x + 1
@torch.compile(backend="eager")
def b(x):
return a(x)
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
b(torch.ones(3))
@torch.compile(backend="eager")
def c(x):
with torch.compiler.set_stance("force_eager"):
return x + 1
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
c(torch.ones(3))
@torch.compile(backend="eager")
@torch.compiler.set_stance("force_eager")
def d(x):
return x + 1
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
d(torch.ones(3))
@torch.compile(backend="eager")
def e(x):
with torch._dynamo.set_stance("force_eager"):
return x + 1
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
e(torch.ones(3))
@torch.compile(backend="eager")
def f(x):
torch._dynamo.eval_frame._set_stance("force_eager")
return x + 1
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
f(torch.ones(3))
@torch.compile(backend="eager")
def g(x):
torch._dynamo.skip_frame()
# NOTE: torch._dynamo.is_compiling() will get traced
# and return true. torch.compiler.is_compiling() is skipped
# and will return false.
if torch.compiler.is_compiling():
raise RuntimeError("Expect this frame to be skipped")
# should not be traced, but eval frame callback is still set
with torch.compiler.set_stance("force_eager"):
return x + 1
with self.assertRaisesRegex(RuntimeError, "set_stance in a torch.compile"):
g(torch.ones(3))
def test_set_stance_force_backend(self):
@torch.compile
def a(x):
return x + 1
cnts = torch._dynamo.testing.CompileCounter()
@torch.compiler.set_stance("default", force_backend=cnts)
def b(x):
return a(x)
b(torch.ones(3))
self.assertEqual(cnts.frame_count, 1)
@torch.compiler.set_stance("default", force_backend="eager")
def c(x):
return a(x)
# just make sure this doesn't crash
c(torch.ones(3))
with self.assertRaisesRegex(RuntimeError, "force_backend"):
@torch.compiler.set_stance("force_eager", force_backend="eager")
def d(x):
pass
def test_set_stance_force_backend_with_disable(self):
@torch.compiler.disable
def inner(x):
return x
@torch.compile(backend="eager")
def f(x):
return inner(x)
f(torch.randn(3, 3))
def fail_backend(gm, ex):
raise RuntimeError("fail!")
# should not raise error
with torch.compiler.set_stance("default", force_backend=fail_backend):
f(torch.randn(3, 3))
# also tests a lot of torch._dynamo.patch_dynamo_config functionality
def test_dont_skip_tracing(self):
from torch._dynamo.test_dont_skip_tracing_functions import f1, f3, f4, f5, f6
cnts = torch._dynamo.testing.CompileCounter()
# make sure test_dont_skip_tracing_functions is actually skipped by trace rules
torch.compile(f1, backend=cnts)(torch.randn(3))
self.assertEqual(cnts.frame_count, 0)
f1_unskip = torch._dynamo.dont_skip_tracing(f1)
# basic test
def g1(x):
return f1_unskip(x)
cnts.clear()
torch.compile(g1, backend=cnts, fullgraph=True)(torch.randn(3))
self.assertEqual(cnts.frame_count, 1)
# test that dont_skip_tracing is traceable
def g2(x):
return torch._dynamo.dont_skip_tracing(f1)(x)
cnts.clear()
torch.compile(g2, backend=cnts, fullgraph=True)(torch.randn(3))
self.assertEqual(cnts.frame_count, 1)
# test that dont_skip_tracing is recursive, applied to non-skipped function
@torch._dynamo.dont_skip_tracing
def g3(x):
return f1(x)
cnts.clear()
torch.compile(g3, backend=cnts, fullgraph=True)(torch.randn(3))
self.assertEqual(cnts.frame_count, 1)
# test that dont_skip_tracing is recursive, applied to skipped function
f3_unskip = torch._dynamo.dont_skip_tracing(f3)
cnts.clear()
torch.compile(f3_unskip, backend=cnts, fullgraph=True)(torch.randn(3))
self.assertEqual(cnts.frame_count, 1)
# test dont_skip_tracing with graph breaks
inp = torch.ones(3)
res = torch.compile(f4, backend=cnts)(inp)
self.assertEqual(res, inp + 6)
@torch.compile(backend=cnts)
def g4(x):
x = f5(x, 1)
x = torch._dynamo.dont_skip_tracing(f6)(x)
x = f5(x, 8)
return x
res = g4(inp)
self.assertEqual(res, inp + 6)
# test nested dont_skip_tracing
# this also happens to test if a previously skipped frame (f4)
# can actually be compiled if called as a top-level function (in the case of a graph break)
# TODO the reset is necessary for now since attempting to trace f4 previously
# resulted in an unconditional skip
torch._dynamo.reset()
f4_unskip = torch._dynamo.dont_skip_tracing(f4)
res = torch.compile(f4_unskip, backend=cnts)(inp)
self.assertEqual(res, inp + 15)
# test dont_skip_tracing that is activated outside torch.compile
f4_unskip2 = torch._dynamo.dont_skip_tracing(torch.compile(f4, backend=cnts))
res = f4_unskip2(inp)
self.assertEqual(res, inp + 15)
# test context manager from inside
@torch.compile(backend=cnts)
def g5(x):
x = f5(x, 1)
with torch._dynamo.dont_skip_tracing():
x = f5(x, 2)
torch._dynamo.graph_break()
x = f5(x, 4)
x = f5(x, 8)
return x
res = g5(inp)
self.assertEqual(res, inp + 6)
# test context manager from outside
with torch._dynamo.dont_skip_tracing():
res = torch.compile(f4, backend=cnts)(inp)
self.assertEqual(res, inp + 15)
# test skipped function from different dont_skip_tracing regions
@torch.compile(backend=cnts)
def g6(x):
fn1 = f5
with torch._dynamo.dont_skip_tracing():
fn2 = f5
x = fn1(x, 1)
x = fn2(x, 2)
return x
res = g6(inp)
self.assertEqual(res, inp + 1)
def test_patch_dynamo_config_errors(self):
@torch.compile(backend="eager")
def f1(x):
with torch._dynamo.patch_dynamo_config(nonexistent=False):
return x + 1
with self.assertRaisesRegex(Exception, "patch_dynamo_config does not support"):
f1(torch.randn(3))
@torch.compile(backend="eager")
def f2(x):
with torch._dynamo.patch_dynamo_config("verbose", {"a": 1}):
return x + 1
with self.assertRaisesRegex(
Exception, "patch_dynamo_config does not support .* with non-safe-constant"
):
f2(torch.randn(3))
@torch.compile(backend="eager")
def f3(x):
with torch._dynamo.patch_dynamo_config({"recompile_limit": 1}):
return x + 1
with self.assertRaisesRegex(Exception, "patch_dynamo_config does not support"):
f3(torch.randn(3))
@torch.compile(backend="eager")
def f4(x):
with torch._dynamo.patch_dynamo_config(verbose=object()):
return x + 1
with self.assertRaisesRegex(
Exception, "Cannot convert patch_dynamo_config args/kwargs to constants."
):
f4(torch.randn(3))
def test_error_on_graph_break(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f1(x):
x = x + 1
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.graph_break()
return x + 2
inp = torch.ones(3)
self.assertEqual(f1(inp), inp + 3)
self.assertEqual(cnts.frame_count, 2)
@torch.compile(backend=cnts)
def f2(x):
x = x + 1
with torch._dynamo.error_on_graph_break(True):
torch._dynamo.graph_break()
return x + 2
with self.assertRaises(Unsupported):
f2(inp)
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f3(x):
x = x + 1
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.graph_break()
x = x + 2
torch._dynamo.graph_break()
return x + 4
cnts.clear()
self.assertEqual(f3(inp), inp + 7)
self.assertEqual(cnts.frame_count, 3)
def inner_f4(x):
x = x + 2
torch._dynamo.graph_break()
return x + 4
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f4(x):
x = x + 1
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.skip_frame()
return inner_f4(x)
cnts.clear()
self.assertEqual(f4(inp), inp + 7)
self.assertEqual(cnts.frame_count, 2)
def test_error_on_graph_break_nested(self):
# error_on_graph_break in a nested frame
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.error_on_graph_break(False)
def inner_f5(x):
x = x + 2
torch._dynamo.graph_break()
return x + 4
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f5(x):
x = x + 1
return inner_f5(x)
inp = torch.ones(3)
self.assertEqual(f5(inp), inp + 7)
self.assertEqual(cnts.frame_count, 4)
def inner_f6(x):
x = x + 2
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.graph_break()
return x + 4
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f6(x):
x = x + 1
return inner_f6(x)
cnts.clear()
self.assertEqual(f6(inp), inp + 7)
self.assertEqual(cnts.frame_count, 3)
def inner_f7(x):
x = x + 2
with torch._dynamo.error_on_graph_break(True):
torch._dynamo.graph_break()
return x + 4
@torch._dynamo.error_on_graph_break(False)
@torch.compile(backend=cnts)
def f7(x):
x = x + 1
return inner_f7(x)
with self.assertRaises(Unsupported):
f7(inp)
def test_error_on_graph_break_nested_with_skip(self):
# error_on_graph_break in a nested frame with a skipped frame in between
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.error_on_graph_break(False)
def inner2_f8(x):
x = x + 2
torch._dynamo.graph_break()
return x + 4
def inner1_f8(x):
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.skip_frame()
return inner2_f8(x)
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f8(x):
x = x + 1
return inner1_f8(x)
inp = torch.ones(3)
self.assertEqual(f8(inp), inp + 7)
self.assertEqual(cnts.frame_count, 4)
def inner2_f9(x):
x = x + 2
with torch._dynamo.error_on_graph_break(True):
torch._dynamo.graph_break()
return x + 4
@torch._dynamo.disable(recursive=False)
def inner1_f9(x):
return inner2_f9(x)
@torch._dynamo.error_on_graph_break(False)
@torch.compile(backend=cnts)
def f9(x):
x = x + 1
return inner1_f9(x)
with self.assertRaises(Unsupported):
f9(inp)
# test export with error_on_graph_break(False) still errors
def test_error_on_graph_break_export(self):
@torch._dynamo.error_on_graph_break(False)
def inner(x):
x = x + 2
torch._dynamo.graph_break()
return x + 4
def f(x):
x = x + 1
return inner(x)
with self.assertRaises(Unsupported):
torch._dynamo.export(f)(torch.ones(3))
def test_error_on_graph_break_nested_deep(self):
cnts = torch._dynamo.testing.CompileCounter()
def inner1_f1(x):
x = x + 1
torch._dynamo.graph_break()
return x + 2
def inner2_f1(x):
return inner1_f1(x)
def inner3_f1(x):
with torch._dynamo.error_on_graph_break(False):
return inner2_f1(x)
def inner4_f1(x):
return inner3_f1(x)
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend=cnts)
def f1(x):
x = x + 4
return inner4_f1(x)
inp = torch.ones(3)
self.assertEqual(f1(inp), inp + 7)
self.assertEqual(cnts.frame_count, 4)
def inner1_f2(x):
x = x + 1
torch._dynamo.graph_break()
return x + 2
def inner2_f2(x):
return inner1_f2(x)
def inner3_f2(x):
with torch._dynamo.error_on_graph_break(True):
return inner2_f2(x)
def inner4_f2(x):
return inner3_f2(x)
@torch._dynamo.error_on_graph_break(False)
@torch.compile(backend=cnts)
def f2(x):
x = x + 4
return inner4_f2(x)
with self.assertRaises(Unsupported):
f2(inp)
def test_error_on_graph_break_error(self):
@torch.compile(backend="eager")
def f1():
with torch._dynamo.error_on_graph_break(foo="bar"):
pass
@torch.compile(backend="eager")
def f2():
with torch._dynamo.error_on_graph_break():
pass
@torch.compile(backend="eager")
def f3():
with torch._dynamo.error_on_graph_break("foo"):
pass
with self.assertRaises(Exception):
f1()
with self.assertRaises(Exception):
f2()
with self.assertRaises(Exception):
f3()
def test_nested_compile_error_on_graph_break(self):
inp = torch.ones(3)
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend="eager")
def inner_f1(x):
x = x + 1
torch._dynamo.graph_break()
return x + 2
@torch._dynamo.error_on_graph_break(False)
@torch.compile(backend="eager")
def f1(x):
return inner_f1(x)
with self.assertRaises(Unsupported):
f1(inp)
@torch._dynamo.error_on_graph_break(False)
@torch.compile(backend="eager")
def inner_f2(x):
x = x + 1
torch._dynamo.graph_break()
return x + 2
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend="eager")
def f2(x):
return inner_f2(x)
self.assertEqual(f2(inp), inp + 3)
def test_error_on_graph_break_fullgraph(self):
# Test that error_on_graph_break=False cannot override fullgraph=True
inp = torch.ones(3)
@torch.compile(backend="eager", fullgraph=True)
def f(x):
x = x + 1
with torch._dynamo.error_on_graph_break(False):
torch._dynamo.graph_break()
return x + 2
with self.assertRaises(Unsupported):
f(inp)
def test_error_on_graph_break_empty_graph(self):
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend="eager")
def f():
return 1
self.assertEqual(f(), 1)
def test_error_on_graph_break_nonempty_checkpoint(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnts)
def fn(x):
x = x + 1
x = x + 1
x = x + 1
with torch._dynamo.error_on_graph_break(True):
torch._dynamo.graph_break()
return x + 1
with self.assertRaises(Unsupported):
fn(torch.ones(3))
self.assertEqual(cnts.frame_count, 0)
def test_nested_compile_fullgraph(self):
# Test that fullgraph=True cannot be toggled back by fullgraph=False
inp = torch.ones(3)
@torch.compile(backend="eager", fullgraph=True)
def inner_f1(x):
torch._dynamo.graph_break()
return x + 1
@torch.compile(backend="eager", fullgraph=False)
def outer_f1(x):
return inner_f1(x)
with self.assertRaises(Unsupported):
outer_f1(inp)
@torch.compile(backend="eager", fullgraph=False)
def inner_f2(x):
torch._dynamo.graph_break()
return x + 1
@torch.compile(backend="eager", fullgraph=True)
def outer_f2(x):
return inner_f2(x)
with self.assertRaises(Unsupported):
outer_f2(inp)
def test_disable_recursive_flags(self):
class SimpleLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(4, 4)
def forward(self, inp):
return self.layer0(torch.sigmoid(inp))
class SimpleModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = SimpleLinear()
self.layer1 = torch.nn.Linear(4, 4)
def forward(self, inp):
z = self.layer0(torch.sin(inp))
return self.layer1(z)
for recursive_flag in [True, False]:
model = SimpleModel()
other_model = SimpleModel()
model.forward = torch._dynamo.disable(
model.forward,
recursive=recursive_flag,
)
self.assertEqual(
torch._dynamo.is_dynamo_disable_recursive(model.forward),
recursive_flag,
)
other_model = torch._dynamo.disable(other_model, recursive=recursive_flag)
self.assertEqual(
torch._dynamo.is_dynamo_disable_recursive(
other_model.forward
if isinstance(other_model, torch.nn.Module)
else other_model
),
recursive_flag,
)
# check the model is compilable
torch.compile(model)
torch.compile(other_model)
def test_dynamo_disable_annotations(self):
class SimpleModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.register_buffer("buffer", torch.rand(2, 2))
@torch._dynamo.disable()
def f1(self, x) -> torch.Tensor:
return x + self.buffer + 1
@torch._dynamo.disable()
def f2(self, x) -> torch.Tensor:
return x + self.buffer + 2
def forward(self, x) -> torch.Tensor:
return self.f1(x) + self.f2(x)
model = SimpleModel()
inp = torch.rand(2, 2)
with torch.fx.traceback.preserve_node_meta():
exported_model = torch.export.export(model, (inp,))
graph = exported_model.graph_module.graph
found_f1 = False
found_f2 = False
for node in graph.nodes:
if "custom" in node.meta:
if "_torchdynamo_disable_method" in node.meta["custom"]:
if node.meta["custom"]["_torchdynamo_disable_method"] == "f1":
found_f1 = True
elif node.meta["custom"]["_torchdynamo_disable_method"] == "f2":
found_f2 = True
self.assertTrue(found_f1)
self.assertTrue(found_f2)
model.forward = torch._dynamo.disable(model.forward, recursive=False)
with self.assertRaises(RuntimeError):
exported_model = torch.export.export(model, (inp,))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| DecoratorTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1061370,
"end": 1062135
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'auto_merge_enabled' event on a given pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "enabler", "pull_request")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
enabler = sgqlc.types.Field("User", graphql_name="enabler")
"""The user who enabled auto-merge for this Pull Request"""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""PullRequest referenced by event."""
| AutoMergeEnabledEvent |
python | ethereum__web3.py | web3/_utils/threads.py | {
"start": 2595,
"end": 3352
} | class ____(threading.Thread, Generic[TReturn]):
def __init__(
self,
target: Callable[..., TReturn] = None,
args: Any = None,
kwargs: Any = None,
) -> None:
super().__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self) -> None:
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout: float = None) -> TReturn:
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
| ThreadWithReturn |
python | kamyu104__LeetCode-Solutions | Python/subarrays-with-xor-at-least-k.py | {
"start": 2427,
"end": 4250
} | class ____(object):
def countXorSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
class Trie(object):
def __init__(self, bit_length):
self.__nodes = []
self.__cnts = []
self.__new_node()
self.__bit_length = bit_length
def __new_node(self):
self.__nodes.append([-1]*2)
self.__cnts.append(0)
return len(self.__nodes)-1
def add(self, num):
curr = 0
for i in reversed(xrange(self.__bit_length)):
x = (num>>i)&1
if self.__nodes[curr][x] == -1:
self.__nodes[curr][x] = self.__new_node()
curr = self.__nodes[curr][x]
self.__cnts[curr] += 1
def query(self, prefix, k):
result = curr = 0
for i in reversed(xrange(self.__bit_length)):
t = (k>>i)&1
x = (prefix>>i)&1
if t == 0:
tmp = self.__nodes[curr][1^x]
if tmp != -1:
result += self.__cnts[tmp]
curr = self.__nodes[curr][t^x]
if curr == -1:
break
else:
result += self.__cnts[curr]
return result
result = prefix = 0
mx = max(max(nums), k, 1)
trie = Trie(mx.bit_length())
trie.add(prefix)
for x in nums:
prefix ^= x
result += trie.query(prefix, k)
trie.add(prefix)
return result
| Solution_TLE |
python | ray-project__ray | python/ray/data/namespace_expressions/dt_namespace.py | {
"start": 521,
"end": 3208
} | class ____:
"""Datetime namespace for operations on datetime-typed expression columns."""
_expr: "Expr"
def _unary_temporal_int(
self, func: Callable[[pyarrow.Array], pyarrow.Array]
) -> "UDFExpr":
"""Helper for year/month/… that return int32."""
@pyarrow_udf(return_dtype=DataType.int32())
def _udf(arr: pyarrow.Array) -> pyarrow.Array:
return func(arr)
return _udf(self._expr)
# extractors
def year(self) -> "UDFExpr":
"""Extract year component."""
return self._unary_temporal_int(pc.year)
def month(self) -> "UDFExpr":
"""Extract month component."""
return self._unary_temporal_int(pc.month)
def day(self) -> "UDFExpr":
"""Extract day component."""
return self._unary_temporal_int(pc.day)
def hour(self) -> "UDFExpr":
"""Extract hour component."""
return self._unary_temporal_int(pc.hour)
def minute(self) -> "UDFExpr":
"""Extract minute component."""
return self._unary_temporal_int(pc.minute)
def second(self) -> "UDFExpr":
"""Extract second component."""
return self._unary_temporal_int(pc.second)
# formatting
def strftime(self, fmt: str) -> "UDFExpr":
"""Format timestamps with a strftime pattern."""
@pyarrow_udf(return_dtype=DataType.string())
def _format(arr: pyarrow.Array) -> pyarrow.Array:
return pc.strftime(arr, format=fmt)
return _format(self._expr)
# rounding
def ceil(self, unit: TemporalUnit) -> "UDFExpr":
"""Ceil timestamps to the next multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _ceil(arr: pyarrow.Array) -> pyarrow.Array:
return pc.ceil_temporal(arr, multiple=1, unit=unit)
return _ceil(self._expr)
def floor(self, unit: TemporalUnit) -> "UDFExpr":
"""Floor timestamps to the previous multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _floor(arr: pyarrow.Array) -> pyarrow.Array:
return pc.floor_temporal(arr, multiple=1, unit=unit)
return _floor(self._expr)
def round(self, unit: TemporalUnit) -> "UDFExpr":
"""Round timestamps to the nearest multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _round(arr: pyarrow.Array) -> pyarrow.Array:
return pc.round_temporal(arr, multiple=1, unit=unit)
return _round(self._expr)
| _DatetimeNamespace |
python | PrefectHQ__prefect | src/integrations/prefect-shell/prefect_shell/commands.py | {
"start": 4226,
"end": 6450
} | class ____(JobRun[list[str]]):
"""
A class representing a shell process.
"""
def __init__(self, shell_operation: "ShellOperation", process: Process):
self._shell_operation = shell_operation
self._process = process
self._output: list[str] = []
@property
def pid(self) -> int:
"""
The PID of the process.
Returns:
The PID of the process.
"""
return self._process.pid
@property
def return_code(self) -> int | None:
"""
The return code of the process.
Returns:
The return code of the process, or `None` if the process is still running.
"""
return self._process.returncode
async def _capture_output(self, source: Any):
"""
Capture output from source.
"""
async for output in TextReceiveStream(source):
text = output.rstrip()
if self._shell_operation.stream_output:
self.logger.info(f"PID {self.pid} stream output:{os.linesep}{text}")
self._output.extend(text.split(os.linesep))
@sync_compatible
async def wait_for_completion(self) -> None:
"""
Wait for the shell command to complete after a process is triggered.
"""
self.logger.debug(f"Waiting for PID {self.pid} to complete.")
await asyncio.gather(
self._capture_output(self._process.stdout),
self._capture_output(self._process.stderr),
)
await self._process.wait()
if self.return_code != 0:
raise RuntimeError(
f"PID {self.pid} failed with return code {self.return_code}."
)
self.logger.info(
f"PID {self.pid} completed with return code {self.return_code}."
)
@sync_compatible
async def fetch_result(self) -> list[str]:
"""
Retrieve the output of the shell operation.
Returns:
The lines output from the shell operation as a list.
"""
if self._process.returncode is None:
self.logger.info("Process is still running, result may be incomplete.")
return self._output
| ShellProcess |
python | getsentry__sentry | src/sentry/integrations/gitlab/client.py | {
"start": 2509,
"end": 16048
} | class ____(IntegrationProxyClient, RepositoryClient, CommitContextClient):
def __init__(self, installation: GitlabIntegration):
self.installation = installation
verify_ssl = self.metadata["verify_ssl"]
self.is_refreshing_token = False
self.refreshed_identity: RpcIdentity | None = None
self.base_url = self.metadata["base_url"]
org_integration_id = installation.org_integration.id
self.integration_name = IntegrationProviderSlug.GITLAB
super().__init__(
integration_id=installation.model.id,
org_integration_id=org_integration_id,
verify_ssl=verify_ssl,
)
@property
def identity(self) -> RpcIdentity:
if self.refreshed_identity:
return self.refreshed_identity
return self.installation.default_identity
@property
def metadata(self):
return self.installation.model.metadata
def build_url(self, path: str) -> str:
path = GitLabApiClientPath.build_api_url(self.base_url, path)
path = super().build_url(path=path)
return path
@control_silo_function
def authorize_request(self, prepared_request: PreparedRequest) -> PreparedRequest:
access_token = self.identity.data["access_token"]
prepared_request.headers["Authorization"] = f"Bearer {access_token}"
return prepared_request
def _refresh_auth(self):
"""
Modeled after Doorkeeper's docs
where Doorkeeper is a dependency for GitLab that handles OAuth
https://github.com/doorkeeper-gem/doorkeeper/wiki/Enable-Refresh-Token-Credentials#testing-with-oauth2-gem
"""
return self.identity.get_identity().refresh_identity(
self.identity,
refresh_token_url="{}{}".format(
self.base_url.rstrip("/"), GitLabApiClientPath.oauth_token
),
verify_ssl=self.metadata["verify_ssl"],
)
def request(self, *args: Any, **kwargs: Any):
if SiloMode.get_current_mode() == SiloMode.REGION:
# Skip token refreshes in Region silo, as these will
# be handled below by the control silo when the
# integration proxy invokes the client code.
return super().request(*args, **kwargs)
return self._issue_request_with_auto_token_refresh(*args, **kwargs)
def _issue_request_with_auto_token_refresh(self, *args: Any, **kwargs: Any):
try:
response = super().request(*args, **kwargs)
except ApiUnauthorized:
if self.is_refreshing_token:
raise
return self._attempt_request_after_refreshing_token(*args, **kwargs)
if (
kwargs.get("raw_response", False)
and response.status_code == 401
and not self.is_refreshing_token
):
# Because the caller may make the request with the raw_response
# option, we need to manually check the response status code and
# refresh the token if an auth error occurs.
return self._attempt_request_after_refreshing_token(*args, **kwargs)
return response
def _attempt_request_after_refreshing_token(self, *args: Any, **kwargs: Any):
assert not self.is_refreshing_token, "A token refresh is already occurring"
self.is_refreshing_token = True
self.refreshed_identity = self._refresh_auth()
response = super().request(*args, **kwargs)
self.is_refreshing_token = False
self.refreshed_identity = None
return response
def get_user(self):
"""Get a user
See https://docs.gitlab.com/ee/api/users.html#single-user
"""
return self.get(GitLabApiClientPath.user)
def search_projects(self, group=None, query=None, simple=True):
"""Get projects
See https://docs.gitlab.com/ee/api/groups.html#list-a-group-s-projects
and https://docs.gitlab.com/ee/api/projects.html#list-all-projects
"""
def gen_params(page_number, page_size):
# Simple param returns limited fields for the project.
# Really useful, because we often don't need most of the project information
params = {
"search": query,
"simple": simple,
"order_by": "last_activity_at",
"page": page_number + 1, # page starts at 1
"per_page": page_size,
}
if group:
extra_params = {"include_subgroups": self.metadata.get("include_subgroups", False)}
else:
extra_params = {"membership": True}
params.update(extra_params)
return params
def get_results(resp):
return resp
if group:
path = GitLabApiClientPath.group_projects.format(group=group)
else:
path = GitLabApiClientPath.projects
return self.get_with_pagination(path, gen_params, get_results)
def get_project(self, project_id):
"""Get project
See https://docs.gitlab.com/ee/api/projects.html#get-single-project
"""
return self.get(GitLabApiClientPath.project.format(project=project_id))
def get_issue(self, project_id, issue_id):
"""Get an issue
See https://docs.gitlab.com/ee/api/issues.html#single-issue
"""
try:
return self.get(GitLabApiClientPath.issue.format(project=project_id, issue=issue_id))
except IndexError:
raise ApiError("Issue not found with ID", 404)
def create_issue(self, project, data):
"""Create an issue
See https://docs.gitlab.com/ee/api/issues.html#new-issue
"""
return self.post(GitLabApiClientPath.issues.format(project=project), data=data)
def create_comment(self, repo: str, issue_id: str, data: dict[str, Any]):
"""Create an issue note/comment
See https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note
"""
return self.post(
GitLabApiClientPath.create_issue_note.format(project=repo, issue_id=issue_id), data=data
)
def update_comment(self, repo: str, issue_id: str, comment_id: str, data: dict[str, Any]):
"""Modify existing issue note
See https://docs.gitlab.com/ee/api/notes.html#modify-existing-issue-note
"""
return self.put(
GitLabApiClientPath.update_issue_note.format(
project=repo, issue_id=issue_id, note_id=comment_id
),
data=data,
)
def create_pr_comment(self, repo: Repository, pr: PullRequest, data: dict[str, Any]) -> Any:
project_id = repo.config["project_id"]
url = GitLabApiClientPath.create_pr_note.format(project=project_id, pr_key=pr.key)
return self.post(url, data=data)
def update_pr_comment(
self,
repo: Repository,
pr: PullRequest,
pr_comment: PullRequestComment,
data: dict[str, Any],
) -> Any:
project_id = repo.config["project_id"]
url = GitLabApiClientPath.update_pr_note.format(
project=project_id, pr_key=pr.key, note_id=pr_comment.external_id
)
return self.put(url, data=data)
def search_project_issues(self, project_id, query, iids=None):
"""Search issues in a project
See https://docs.gitlab.com/ee/api/issues.html#list-project-issues
"""
path = GitLabApiClientPath.project_issues.format(project=project_id)
return self.get(path, params={"scope": "all", "search": query, "iids": iids})
def create_project_webhook(self, project_id):
"""Create a webhook on a project
See https://docs.gitlab.com/ee/api/projects.html#add-project-hook
"""
path = GitLabApiClientPath.project_hooks.format(project=project_id)
hook_uri = reverse("sentry-extensions-gitlab-webhook")
model = self.installation.model
data = {
"url": absolute_uri(hook_uri),
"token": "{}:{}".format(model.external_id, model.metadata["webhook_secret"]),
"merge_requests_events": True,
"push_events": True,
"enable_ssl_verification": model.metadata["verify_ssl"],
}
resp = self.post(path, data=data)
return resp["id"]
def delete_project_webhook(self, project_id, hook_id):
"""Delete a webhook from a project
See https://docs.gitlab.com/ee/api/projects.html#delete-project-hook
"""
path = GitLabApiClientPath.project_hook.format(project=project_id, hook_id=hook_id)
return self.delete(path)
def get_last_commits(self, project_id, end_sha):
"""Get the last set of commits ending at end_sha
Gitlab doesn't give us a good way to do this, so we fetch the end_sha
and use its date to find the block of commits. We only fetch one page
of commits to match other implementations (github, bitbucket)
See https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit and
https://docs.gitlab.com/ee/api/commits.html#list-repository-commits
"""
path = GitLabApiClientPath.commit.format(project=project_id, sha=end_sha)
commit = self.get(path)
if not commit:
return []
end_date = commit["created_at"]
path = GitLabApiClientPath.commits.format(project=project_id)
return self.get(path, params={"until": end_date})
def get_commit(self, project_id, sha):
"""
Get the details of a commit
See https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit
"""
return self.get_cached(GitLabApiClientPath.commit.format(project=project_id, sha=sha))
def get_merge_commit_sha_from_commit(self, repo: Repository, sha: str) -> str | None:
"""
Get the merge commit sha from a commit sha
See https://docs.gitlab.com/api/commits/#list-merge-requests-associated-with-a-commit
"""
project_id = repo.config["project_id"]
path = GitLabApiClientPath.commit_merge_requests.format(project=project_id, sha=sha)
response = self.get(path)
# Filter out non-merged merge requests
merge_requests = []
for merge_request in response:
if merge_request["state"] == "merged":
merge_requests.append(merge_request)
if len(merge_requests) != 1:
# the response should return a single merged PR, returning None if multiple
return None
merge_request = merge_requests[0]
return merge_request["merge_commit_sha"] or merge_request["squash_commit_sha"]
def compare_commits(self, project_id, start_sha, end_sha):
"""Compare commits between two SHAs
See https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits
"""
path = GitLabApiClientPath.compare.format(project=project_id)
return self.get(path, params={"from": start_sha, "to": end_sha})
def get_diff(self, project_id, sha):
"""Get the diff for a commit
See https://docs.gitlab.com/ee/api/commits.html#get-the-diff-of-a-commit
"""
path = GitLabApiClientPath.diff.format(project=project_id, sha=sha)
return self.get(path)
def check_file(self, repo: Repository, path: str, version: str | None) -> object | None:
"""Fetch a file for stacktrace linking
See https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository
Path requires file path and ref
file_path must also be URL encoded Ex. lib%2Fclass%2Erb
"""
project_id = repo.config["project_id"]
encoded_path = quote(path, safe="")
request_path = GitLabApiClientPath.file.format(project=project_id, path=encoded_path)
# Gitlab can return 404 or 400 if the file doesn't exist
return self.head_cached(request_path, params={"ref": version})
def get_file(
self, repo: Repository, path: str, ref: str | None, codeowners: bool = False
) -> str:
"""Get the contents of a file
See https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository
Path requires file path and ref
file_path must also be URL encoded Ex. lib%2Fclass%2Erb
"""
project_id = repo.config["project_id"]
encoded_path = quote(path, safe="")
request_path = GitLabApiClientPath.file_raw.format(project=project_id, path=encoded_path)
contents = self.get(request_path, params={"ref": ref}, raw_response=True)
result = contents.content.decode("utf-8")
return result
def get_blame_for_files(
self, files: Sequence[SourceLineInfo], extra: Mapping[str, Any]
) -> list[FileBlameInfo]:
metrics.incr("sentry.integrations.gitlab.get_blame_for_files")
return fetch_file_blames(
self,
files,
extra={
**extra,
"provider": IntegrationProviderSlug.GITLAB.value,
"org_integration_id": self.org_integration_id,
},
)
def get_pr_diffs(self, repo: Repository, pr: PullRequest) -> list[dict[str, Any]]:
project_id = repo.config["project_id"]
path = GitLabApiClientPath.build_pr_diffs(project=project_id, pr_key=pr.key, unidiff=True)
return self.get(path)
| GitLabApiClient |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 3225,
"end": 3701
} | class ____(AirflowException):
"""Raise when a DAG's ID is already used by another DAG."""
def __init__(self, dag_id: str, incoming: str, existing: str) -> None:
super().__init__(dag_id, incoming, existing)
self.dag_id = dag_id
self.incoming = incoming
self.existing = existing
def __str__(self) -> str:
return f"Ignoring DAG {self.dag_id} from {self.incoming} - also found in {self.existing}"
| AirflowDagDuplicatedIdException |
python | kamyu104__LeetCode-Solutions | Python/maximum-palindromes-after-operations.py | {
"start": 68,
"end": 525
} | class ____(object):
def maxPalindromesAfterOperations(self, words):
"""
:type words: List[str]
:rtype: int
"""
cnt = [0]*26
for w in words:
for c in w:
cnt[ord(c)-ord('a')] += 1
curr = sum(x//2 for x in cnt)
for i, l in enumerate(sorted(map(len, words))):
curr -= l//2
if curr < 0:
return i
return len(words)
| Solution |
python | davidhalter__parso | parso/python/errors.py | {
"start": 44078,
"end": 45338
} | class ____(_CheckAssignmentRule):
message = "illegal expression for augmented assignment"
extended_message = "'{target}' is an " + message
def is_issue(self, node):
augassign = node.children[1]
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
if self._normalizer.version <= (3, 8) or not is_aug_assign:
for before_equal in node.children[:-2:2]:
self._check_assignment(before_equal, is_aug_assign=is_aug_assign)
if is_aug_assign:
target = _remove_parens(node.children[0])
# a, a[b], a.b
if target.type == "name" or (
target.type in ("atom_expr", "power")
and target.children[1].type == "trailer"
and target.children[-1].children[0] != "("
):
return False
if self._normalizer.version <= (3, 8):
return True
else:
self.add_issue(
node,
message=self.extended_message.format(
target=_get_rhs_name(node.children[0], self._normalizer.version)
),
)
@ErrorFinder.register_rule(type='with_item')
| _ExprStmtRule |
python | django-haystack__django-haystack | test_haystack/mocks.py | {
"start": 2989,
"end": 3246
} | class ____(MockSearchBackend):
model_name = "charpkmockmodel"
mock_search_results = [
MockSearchResult("core", "CharPKMockModel", "sometext", 0.5),
MockSearchResult("core", "CharPKMockModel", "1234", 0.3),
]
| CharPKMockSearchBackend |
python | numpy__numpy | numpy/f2py/tests/test_data.py | {
"start": 1749,
"end": 2124
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")]
# For gh-23276
def test_data_stmts(self):
assert self.module.mycom.mydata == 0
def test_crackedlines(self):
mod = crackfortran(str(self.sources[0]))
print(mod[0]['vars'])
assert mod[0]['vars']['mydata']['='] == '0'
| TestDataF77 |
python | getsentry__sentry | src/sentry/tasks/auth/auth.py | {
"start": 3873,
"end": 6769
} | class ____(abc.ABC):
"""Remove members who don't comply with a new org requirement."""
log_label = ""
@abc.abstractmethod
def is_compliant(self, user: RpcUser) -> bool:
"""Check whether a member complies with the new requirement."""
raise NotImplementedError()
@abc.abstractmethod
def call_to_action(self, org: Organization, user: RpcUser, member: OrganizationMember):
"""Prompt a member to comply with the new requirement."""
raise NotImplementedError()
def remove_non_compliant_members(
self, org_id: int, actor_id: int | None, actor_key_id: int | None, ip_address: str | None
):
actor = user_service.get_user(user_id=actor_id) if actor_id else None
def remove_member(org_member: OrganizationMember, user: RpcUser):
logging_data = {
"organization_id": org_id,
"user_id": user.id,
"member_id": org_member.id,
}
removed_member = organization_service.remove_user(
organization_id=org_id, user_id=user.id
)
if removed_member is None:
logger.warning(
"Could not remove %s noncompliant user from org",
self.log_label,
extra=logging_data,
)
else:
logger.info(
"%s noncompliant user removed from org", self.log_label, extra=logging_data
)
create_audit_entry_from_user(
user=actor,
actor_key_id=actor_key_id,
ip_address=ip_address,
event=audit_log.get_event_id("MEMBER_PENDING"),
data=org_member.get_audit_log_data(),
organization_id=org_id,
target_object=org_id,
target_user_id=user.id,
)
# Refresh the org member to ensure we always properly generate an invite link
org_member.refresh_from_db()
org = Organization.objects.get_from_cache(id=org_id)
self.call_to_action(org, user, org_member)
org_members = OrganizationMember.objects.filter(
organization_id=org_id, user_id__isnull=False
)
rpc_users = user_service.get_many_by_id(
ids=[member.user_id for member in org_members if member.user_id is not None]
)
rpc_users_dict = {user.id: user for user in rpc_users}
for member in org_members:
if member.user_id is None:
continue
user = rpc_users_dict.get(member.user_id, None)
if user is None:
continue
if not self.is_compliant(user):
remove_member(org_member=member, user=user)
| OrganizationComplianceTask |
python | huggingface__transformers | tests/quantization/gptq/test_gptq.py | {
"start": 11828,
"end": 13023
} | class ____(GPTQTest):
device_map = {"": 0}
def test_change_loading_attributes(self):
"""
Test the serialization of the model and the loading of the quantized weights works with another config file
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if is_auto_gptq_available() and not is_gptqmodel_available() and not self.use_exllama:
self.check_quantized_layers_type(self.quantized_model, "cuda-old")
# we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map=self.device_map
)
self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits)
self.check_quantized_layers_type(quantized_model_from_saved, "exllama")
self.check_inference_correctness(quantized_model_from_saved)
@require_accelerate
@require_torch_multi_gpu
| GPTQTestCUDA |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin_ini/plugin_fail.py | {
"start": 6133,
"end": 6356
} | class ____(BaseModel):
x: int = 1
y = 2
# MYPY: error: Untyped fields disallowed [pydantic-field]
z = 2 # type: ignore[pydantic-field]
AliasGeneratorModel2(x=1)
AliasGeneratorModel2(y=1, z=1)
| UntypedFieldModel |
python | ray-project__ray | python/ray/data/tests/test_dynamic_block_split.py | {
"start": 782,
"end": 3530
} | class ____(Datasource):
def __init__(
self,
num_tasks: int,
num_batches_per_task: int,
row_size: int,
num_rows_per_batch=None,
use_bytes=True,
use_arrow=False,
):
self.num_tasks = num_tasks
self.num_batches_per_task = num_batches_per_task
self.row_size = row_size
if num_rows_per_batch is None:
num_rows_per_batch = 1
self.num_rows_per_batch = num_rows_per_batch
self.use_bytes = use_bytes
self.use_arrow = use_arrow
def estimate_inmemory_data_size(self):
return None
def get_read_tasks(
self, parallelism: int, per_task_row_limit: Optional[int] = None
) -> List[ReadTask]:
def _blocks_generator():
for _ in range(self.num_batches_per_task):
if self.use_bytes:
# NOTE(swang): Each np object has some metadata bytes, so
# actual size can be much more than num_rows_per_batch * row_size
# if row_size is small.
yield pd.DataFrame(
{
"one": [
np.random.bytes(self.row_size)
for _ in range(self.num_rows_per_batch)
]
}
)
elif self.use_arrow:
batch = {
"one": np.ones(
(self.num_rows_per_batch, self.row_size), dtype=np.uint8
)
}
block = ArrowBlockBuilder._table_from_pydict(batch)
yield block
else:
yield pd.DataFrame(
{
"one": [
np.array2string(np.ones(self.row_size, dtype=int))
for _ in range(self.num_rows_per_batch)
]
}
)
return self.num_tasks * [
ReadTask(
lambda: _blocks_generator(),
BlockMetadata(
num_rows=self.num_batches_per_task * self.num_rows_per_batch,
size_bytes=self.num_batches_per_task
* self.num_rows_per_batch
* self.row_size,
input_files=None,
exec_stats=None,
),
per_task_row_limit=per_task_row_limit,
)
]
def num_rows(self) -> int:
return self.num_tasks * self.num_batches_per_task * self.num_rows_per_batch
| RandomBytesDatasource |
python | OmkarPathak__pygorithm | pygorithm/data_structures/linked_list.py | {
"start": 3400,
"end": 5717
} | class ____(object):
"""DoublyLinkedList
DoublyLinkedList Class
"""
def __init__(self):
"""
constructor
"""
self.head = None
def get_data(self):
"""
prints the elements in the linked list
"""
temp = self.head
l_list = []
while temp:
l_list.append(temp.data)
temp = temp.next
return l_list
def insert_at_start(self, data):
"""
insert an element at the beginning of the linked list
"""
if self.head is None:
self.head = Node(data)
else:
new_node = Node(data)
self.head.previous = new_node
new_node.next = self.head
self.head = new_node
def insert_at_end(self, data):
"""
insert an element at the end of the linked list
"""
new_node = Node(data)
temp = self.head
while temp.next is not None:
temp = temp.next
temp.next = new_node
new_node.previous = temp
def delete(self, data):
"""
to delete specified element from the linked list
"""
temp = self.head
if temp.next is not None:
# if head node is to be deleted
if temp.data == data:
temp.next.previous = None
self.head = temp.next
temp.next = None
return
else:
while temp.next is not None:
if temp.data == data:
break
temp = temp.next
if temp.next:
# if element to be deleted is in between
temp.previous.next = temp.next
temp.next.previous = temp.previous
temp.next = None
temp.previous = None
else:
# if element to be deleted is the last element
temp.previous.next = None
temp.previous = None
return
if temp is None:
return
@staticmethod
def get_code():
"""
returns the code of the current class
"""
return inspect.getsource(DoublyLinkedList)
| DoublyLinkedList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.