language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
python-excel__xlrd
xlrd/xldate.py
{ "start": 1080, "end": 1165 }
class ____(ValueError): "A base class for all datetime-related errors."
XLDateError
python
pypa__pipenv
pipenv/vendor/click/types.py
{ "start": 25137, "end": 31872 }
class ____(ParamType): """The ``Path`` type is similar to the :class:`File` type, but returns the filename instead of an open file. Various checks can be enabled to validate the type of file and permissions. :param exists: The file or directory needs to exist for the value to be valid. If this is not set to ``True``, and the file does not exist, then all further checks are silently skipped. :param file_okay: Allow a file as a value. :param dir_okay: Allow a directory as a value. :param readable: if true, a readable check is performed. :param writable: if true, a writable check is performed. :param executable: if true, an executable check is performed. :param resolve_path: Make the value absolute and resolve any symlinks. A ``~`` is not expanded, as this is supposed to be done by the shell only. :param allow_dash: Allow a single dash as a value, which indicates a standard stream (but does not open it). Use :func:`~click.open_file` to handle opening this value. :param path_type: Convert the incoming path value to this type. If ``None``, keep Python's default, which is ``str``. Useful to convert to :class:`pathlib.Path`. .. versionchanged:: 8.1 Added the ``executable`` parameter. .. versionchanged:: 8.0 Allow passing ``path_type=pathlib.Path``. .. versionchanged:: 6.0 Added the ``allow_dash`` parameter. """ envvar_list_splitter: t.ClassVar[str] = os.path.pathsep def __init__( self, exists: bool = False, file_okay: bool = True, dir_okay: bool = True, writable: bool = False, readable: bool = True, resolve_path: bool = False, allow_dash: bool = False, path_type: t.Optional[t.Type[t.Any]] = None, executable: bool = False, ): self.exists = exists self.file_okay = file_okay self.dir_okay = dir_okay self.readable = readable self.writable = writable self.executable = executable self.resolve_path = resolve_path self.allow_dash = allow_dash self.type = path_type if self.file_okay and not self.dir_okay: self.name: str = _("file") elif self.dir_okay and not self.file_okay: self.name = _("directory") else: self.name = _("path") def to_info_dict(self) -> t.Dict[str, t.Any]: info_dict = super().to_info_dict() info_dict.update( exists=self.exists, file_okay=self.file_okay, dir_okay=self.dir_okay, writable=self.writable, readable=self.readable, allow_dash=self.allow_dash, ) return info_dict def coerce_path_result( self, value: "t.Union[str, os.PathLike[str]]" ) -> "t.Union[str, bytes, os.PathLike[str]]": if self.type is not None and not isinstance(value, self.type): if self.type is str: return os.fsdecode(value) elif self.type is bytes: return os.fsencode(value) else: return t.cast("os.PathLike[str]", self.type(value)) return value def convert( self, value: "t.Union[str, os.PathLike[str]]", param: t.Optional["Parameter"], ctx: t.Optional["Context"], ) -> "t.Union[str, bytes, os.PathLike[str]]": rv = value is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") if not is_dash: if self.resolve_path: # os.path.realpath doesn't resolve symlinks on Windows # until Python 3.8. Use pathlib for now. import pathlib rv = os.fsdecode(pathlib.Path(rv).resolve()) try: st = os.stat(rv) except OSError: if not self.exists: return self.coerce_path_result(rv) self.fail( _("{name} {filename!r} does not exist.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) if not self.file_okay and stat.S_ISREG(st.st_mode): self.fail( _("{name} {filename!r} is a file.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) if not self.dir_okay and stat.S_ISDIR(st.st_mode): self.fail( _("{name} '{filename}' is a directory.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) if self.readable and not os.access(rv, os.R_OK): self.fail( _("{name} {filename!r} is not readable.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) if self.writable and not os.access(rv, os.W_OK): self.fail( _("{name} {filename!r} is not writable.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) if self.executable and not os.access(value, os.X_OK): self.fail( _("{name} {filename!r} is not executable.").format( name=self.name.title(), filename=format_filename(value) ), param, ctx, ) return self.coerce_path_result(rv) def shell_complete( self, ctx: "Context", param: "Parameter", incomplete: str ) -> t.List["CompletionItem"]: """Return a special completion marker that tells the completion system to use the shell to provide path completions for only directories or any paths. :param ctx: Invocation context for this command. :param param: The parameter that is requesting completion. :param incomplete: Value being completed. May be empty. .. versionadded:: 8.0 """ from pipenv.vendor.click.shell_completion import CompletionItem type = "dir" if self.dir_okay and not self.file_okay else "file" return [CompletionItem(incomplete, type=type)]
Path
python
dask__dask
dask/bag/core.py
{ "start": 12681, "end": 65067 }
class ____(DaskMethodsMixin): """Parallel collection of Python objects Examples -------- Create Bag from sequence >>> import dask.bag as db >>> b = db.from_sequence(range(5)) >>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) [0, 20, 40] Create Bag from filename or globstring of filenames >>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP Create manually (expert use) >>> dsk = {('x', 0): (range, 5), ... ('x', 1): (range, 5), ... ('x', 2): (range, 5)} >>> b = db.Bag(dsk, 'x', npartitions=3) >>> sorted(b.map(lambda x: x * 10)) [0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40] >>> int(b.fold(lambda x, y: x + y)) 30 """ def __init__(self, dsk: Graph, name: str, npartitions: int): if not isinstance(dsk, HighLevelGraph): dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[]) self.dask = dsk self.name = name self.npartitions = npartitions def __dask_graph__(self) -> Graph: return self.dask def __dask_keys__(self) -> NestedKeys: return [(self.name, i) for i in range(self.npartitions)] def __dask_layers__(self) -> Sequence[str]: return (self.name,) def __dask_tokenize__(self): return self.name __dask_optimize__ = globalmethod(optimize, key="bag_optimize", falsey=dont_optimize) __dask_scheduler__ = staticmethod(DEFAULT_GET) def __dask_postcompute__(self): return finalize, () def __dask_postpersist__(self): return self._rebuild, () def _rebuild(self, dsk, *, rename=None): name = self.name if rename: name = rename.get(name, name) return type(self)(dsk, name, self.npartitions) def __str__(self): return f"dask.bag<{key_split(self.name)}, npartitions={self.npartitions}>" __repr__ = __str__ str = property(fget=StringAccessor) def map(self, func, *args, **kwargs): """Apply a function elementwise across one or more bags. Note that all ``Bag`` arguments must be partitioned identically. Parameters ---------- func : callable *args, **kwargs : Bag, Item, or object Extra arguments and keyword arguments to pass to ``func`` *after* the calling bag instance. Non-Bag args/kwargs are broadcasted across all calls to ``func``. Notes ----- For calls with multiple `Bag` arguments, corresponding partitions should have the same length; if they do not, the call will error at compute time. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(range(5), npartitions=2) >>> b2 = db.from_sequence(range(5, 10), npartitions=2) Apply a function to all elements in a bag: >>> b.map(lambda x: x + 1).compute() [1, 2, 3, 4, 5] Apply a function with arguments from multiple bags: >>> from operator import add >>> b.map(add, b2).compute() [5, 7, 9, 11, 13] Non-bag arguments are broadcast across all calls to the mapped function: >>> b.map(add, 1).compute() [1, 2, 3, 4, 5] Keyword arguments are also supported, and have the same semantics as regular arguments: >>> def myadd(x, y=0): ... return x + y >>> b.map(myadd, y=b2).compute() [5, 7, 9, 11, 13] >>> b.map(myadd, y=1).compute() [1, 2, 3, 4, 5] Both arguments and keyword arguments can also be instances of ``dask.bag.Item``. Here we'll add the max value in the bag to each element: >>> b.map(myadd, b.max()).compute() [4, 5, 6, 7, 8] """ return bag_map(func, self, *args, **kwargs) def starmap(self, func, **kwargs): """Apply a function using argument tuples from the given bag. This is similar to ``itertools.starmap``, except it also accepts keyword arguments. In pseudocode, this is could be written as: >>> def starmap(func, bag, **kwargs): ... return (func(*args, **kwargs) for args in bag) Parameters ---------- func : callable **kwargs : Item, Delayed, or object, optional Extra keyword arguments to pass to ``func``. These can either be normal objects, ``dask.bag.Item``, or ``dask.delayed.Delayed``. Examples -------- >>> import dask.bag as db >>> data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)] >>> b = db.from_sequence(data, npartitions=2) Apply a function to each argument tuple: >>> from operator import add >>> b.starmap(add).compute() [3, 7, 11, 15, 19] Apply a function to each argument tuple, with additional keyword arguments: >>> def myadd(x, y, z=0): ... return x + y + z >>> b.starmap(myadd, z=10).compute() [13, 17, 21, 25, 29] Keyword arguments can also be instances of ``dask.bag.Item`` or ``dask.delayed.Delayed``: >>> max_second = b.pluck(1).max() >>> max_second.compute() 10 >>> b.starmap(myadd, z=max_second).compute() [13, 17, 21, 25, 29] """ name = "{}-{}".format(funcname(func), tokenize(self, func, "starmap", **kwargs)) dependencies = [self] if kwargs: kwargs, collections = unpack_scalar_dask_kwargs(kwargs) dependencies.extend(collections) dsk = { (name, i): (reify, (starmap_chunk, func, (self.name, i), kwargs)) for i in range(self.npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies) return type(self)(graph, name, self.npartitions) @property def _args(self): return (self.dask, self.name, self.npartitions) def __getstate__(self): return self._args def __setstate__(self, state): self.dask, self.name, self.npartitions = state def filter(self, predicate): """Filter elements in collection by a predicate function. >>> def iseven(x): ... return x % 2 == 0 >>> import dask.bag as db >>> b = db.from_sequence(range(5)) >>> list(b.filter(iseven)) [0, 2, 4] """ name = f"filter-{funcname(predicate)}-{tokenize(self, predicate)}" dsk = { (name, i): (reify, (filter, predicate, (self.name, i))) for i in range(self.npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def random_sample(self, prob, random_state=None): """Return elements from bag with probability of ``prob``. Parameters ---------- prob : float A float between 0 and 1, representing the probability that each element will be returned. random_state : int or random.Random, optional If an integer, will be used to seed a new ``random.Random`` object. If provided, results in deterministic sampling. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(range(10)) >>> b.random_sample(0.5, 43).compute() [0, 1, 3, 4, 7, 9] >>> b.random_sample(0.5, 43).compute() [0, 1, 3, 4, 7, 9] """ if not 0 <= prob <= 1: raise ValueError("prob must be a number in the interval [0, 1]") if not isinstance(random_state, Random): random_state = Random(random_state) name = f"random-sample-{tokenize(self, prob, random_state.getstate())}" state_data = random_state_data_python(self.npartitions, random_state) dsk = { (name, i): (reify, (random_sample, (self.name, i), state, prob)) for i, state in zip(range(self.npartitions), state_data) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def remove(self, predicate): """Remove elements in collection that match predicate. >>> def iseven(x): ... return x % 2 == 0 >>> import dask.bag as db >>> b = db.from_sequence(range(5)) >>> list(b.remove(iseven)) [1, 3] """ name = f"remove-{funcname(predicate)}-{tokenize(self, predicate)}" dsk = { (name, i): (reify, (remove, predicate, (self.name, i))) for i in range(self.npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def map_partitions(self, func, *args, **kwargs): """Apply a function to every partition across one or more bags. Note that all ``Bag`` arguments must be partitioned identically. Parameters ---------- func : callable The function to be called on every partition. This function should expect an ``Iterator`` or ``Iterable`` for every partition and should return an ``Iterator`` or ``Iterable`` in return. *args, **kwargs : Bag, Item, Delayed, or object Arguments and keyword arguments to pass to ``func``. Partitions from this bag will be the first argument, and these will be passed *after*. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(range(1, 101), npartitions=10) >>> def div(nums, den=1): ... return [num / den for num in nums] Using a python object: >>> hi = b.max().compute() >>> hi 100 >>> b.map_partitions(div, den=hi).take(5) (0.01, 0.02, 0.03, 0.04, 0.05) Using an ``Item``: >>> b.map_partitions(div, den=b.max()).take(5) (0.01, 0.02, 0.03, 0.04, 0.05) Note that while both versions give the same output, the second forms a single graph, and then computes everything at once, and in some cases may be more efficient. """ return map_partitions(func, self, *args, **kwargs) def pluck(self, key, default=no_default): """Select item from all tuples/dicts in collection. >>> import dask.bag as db >>> b = db.from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]}, ... {'name': 'Bob', 'credits': [10, 20]}]) >>> list(b.pluck('name')) ['Alice', 'Bob'] >>> list(b.pluck('credits').pluck(0)) [1, 10] """ name = "pluck-" + tokenize(self, key, default) key = quote(key) if default is no_default: dsk = { (name, i): (list, (pluck, key, (self.name, i))) for i in range(self.npartitions) } else: dsk = { (name, i): (list, (pluck, key, (self.name, i), default)) for i in range(self.npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def unzip(self, n): """Transform a bag of tuples to ``n`` bags of their elements. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence([(i, i + 1, i + 2) for i in range(10)]) >>> first, second, third = b.unzip(3) >>> isinstance(first, db.Bag) True >>> first.compute() [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] Note that this is equivalent to: >>> first, second, third = (b.pluck(i) for i in range(3)) """ return tuple(self.pluck(i) for i in range(n)) @wraps(to_textfiles) def to_textfiles( self, path, name_function=None, compression="infer", encoding=system_encoding, compute=True, storage_options=None, last_endline=False, **kwargs, ): return to_textfiles( self, path, name_function, compression, encoding, compute, storage_options=storage_options, last_endline=last_endline, **kwargs, ) @wraps(to_avro) def to_avro( self, filename, schema, name_function=None, storage_options=None, codec="null", sync_interval=16000, metadata=None, compute=True, **kwargs, ): return to_avro( self, filename, schema, name_function, storage_options, codec, sync_interval, metadata, compute, **kwargs, ) def fold( self, binop, combine=None, initial=no_default, split_every=None, out_type=Item ): """Parallelizable reduction Fold is like the builtin function ``reduce`` except that it works in parallel. Fold takes two binary operator functions, one to reduce each partition of our dataset and another to combine results between partitions 1. ``binop``: Binary operator to reduce within each partition 2. ``combine``: Binary operator to combine results from binop Sequentially this would look like the following: >>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP >>> final = reduce(combine, intermediates) # doctest: +SKIP If only one function is given then it is used for both functions ``binop`` and ``combine`` as in the following example to compute the sum: >>> def add(x, y): ... return x + y >>> import dask.bag as db >>> b = db.from_sequence(range(5)) >>> b.fold(add).compute() 10 In full form we provide both binary operators as well as their default arguments >>> b.fold(binop=add, combine=add, initial=0).compute() 10 More complex binary operators are also doable >>> def add_to_set(acc, x): ... ''' Add new element x to set acc ''' ... return acc | set([x]) >>> b.fold(add_to_set, set.union, initial=set()).compute() {0, 1, 2, 3, 4} See Also -------- Bag.foldby """ combine = combine or binop if initial is not no_default: return self.reduction( curry(_reduce, binop, initial=initial), curry(_reduce, combine), split_every=split_every, out_type=out_type, ) else: from tlz.curried import reduce return self.reduction( reduce(binop), reduce(combine), split_every=split_every, out_type=out_type, ) def frequencies(self, split_every=None, sort=False): """Count number of occurrences of each distinct element. >>> import dask.bag as db >>> b = db.from_sequence(['Alice', 'Bob', 'Alice']) >>> dict(b.frequencies()) # doctest: +SKIP {'Alice': 2, 'Bob', 1} """ result = self.reduction( frequencies, merge_frequencies, out_type=Bag, split_every=split_every, name="frequencies", ).map_partitions(dictitems) if sort: result = result.map_partitions(sorted, key=second, reverse=True) return result def topk(self, k, key=None, split_every=None): """K largest elements in collection Optionally ordered by some key function >>> import dask.bag as db >>> b = db.from_sequence([10, 3, 5, 7, 11, 4]) >>> list(b.topk(2)) [11, 10] >>> list(b.topk(2, lambda x: -x)) [3, 4] """ if key: if callable(key) and takes_multiple_arguments(key): key = partial(apply, key) func = partial(topk, k, key=key) else: func = partial(topk, k) return self.reduction( func, compose(func, toolz.concat), out_type=Bag, split_every=split_every, name="topk", ) def distinct(self, key=None): """Distinct elements of collection Unordered without repeats. Parameters ---------- key: {callable,str} Defines uniqueness of items in bag by calling ``key`` on each item. If a string is passed ``key`` is considered to be ``lambda x: x[key]``. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(['Alice', 'Bob', 'Alice']) >>> sorted(b.distinct()) ['Alice', 'Bob'] >>> b = db.from_sequence([{'name': 'Alice'}, {'name': 'Bob'}, {'name': 'Alice'}]) >>> b.distinct(key=lambda x: x['name']).compute() [{'name': 'Alice'}, {'name': 'Bob'}] >>> b.distinct(key='name').compute() [{'name': 'Alice'}, {'name': 'Bob'}] """ func = chunk_distinct if key is None else partial(chunk_distinct, key=key) agg = merge_distinct if key is None else partial(merge_distinct, key=key) return self.reduction(func, agg, out_type=Bag, name="distinct") def reduction( self, perpartition, aggregate, split_every=None, out_type=Item, name=None ): """Reduce collection with reduction operators. Parameters ---------- perpartition: function reduction to apply to each partition aggregate: function reduction to apply to the results of all partitions split_every: int (optional) Group partitions into groups of this size while performing reduction Defaults to 8 out_type: {Bag, Item} The out type of the result, Item if a single element, Bag if a list of elements. Defaults to Item. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(range(10)) >>> b.reduction(sum, sum).compute() 45 """ if split_every is None: split_every = 8 if split_every is False: split_every = self.npartitions token = tokenize(self, perpartition, aggregate, split_every) a = f"{name or funcname(perpartition)}-part-{token}" is_last = self.npartitions == 1 dsk = { (a, i): (empty_safe_apply, perpartition, (self.name, i), is_last) for i in range(self.npartitions) } k = self.npartitions b = a fmt = f"{name or funcname(aggregate)}-aggregate-{token}" depth = 0 while k > split_every: c = fmt + str(depth) for i, inds in enumerate(partition_all(split_every, range(k))): dsk[(c, i)] = ( empty_safe_aggregate, aggregate, [(b, j) for j in inds], False, ) k = i + 1 b = c depth += 1 dsk[(fmt, 0)] = ( empty_safe_aggregate, aggregate, [(b, j) for j in range(k)], True, ) graph = HighLevelGraph.from_collections(fmt, dsk, dependencies=[self]) if out_type is Item: dsk[fmt] = dsk.pop((fmt, 0)) return Item(graph, fmt) else: return Bag(graph, fmt, 1) def sum(self, split_every=None): """Sum all elements""" return self.reduction(sum, sum, split_every=split_every) def max(self, split_every=None): """Maximum element""" return self.reduction(max, max, split_every=split_every) def min(self, split_every=None): """Minimum element""" return self.reduction(min, min, split_every=split_every) def any(self, split_every=None): """Are any of the elements truthy? Examples -------- >>> import dask.bag as db >>> bool_bag = db.from_sequence([True, True, False]) >>> bool_bag.any().compute() True """ return self.reduction(any, any, split_every=split_every) def all(self, split_every=None): """Are all elements truthy? Examples -------- >>> import dask.bag as db >>> bool_bag = db.from_sequence([True, True, False]) >>> bool_bag.all().compute() False """ return self.reduction(all, all, split_every=split_every) def count(self, split_every=None): """Count the number of elements. Examples -------- >>> import dask.bag as db >>> numbers = db.from_sequence([1, 2, 3]) >>> numbers.count().compute() 3 """ return self.reduction(count, sum, split_every=split_every) def mean(self): """Arithmetic mean""" def mean_chunk(seq): total, n = 0.0, 0 for x in seq: total += x n += 1 return total, n def mean_aggregate(x): totals, counts = list(zip(*x)) return 1.0 * sum(totals) / sum(counts) return self.reduction(mean_chunk, mean_aggregate, split_every=False) def var(self, ddof=0): """Variance""" return self.reduction( chunk.var_chunk, partial(chunk.var_aggregate, ddof=ddof), split_every=False ) def std(self, ddof=0): """Standard deviation""" return self.var(ddof=ddof).apply(math.sqrt) def join(self, other, on_self, on_other=None): """Joins collection with another collection. Other collection must be one of the following: 1. An iterable. We recommend tuples over lists for internal performance reasons. 2. A delayed object, pointing to a tuple. This is recommended if the other collection is sizable and you're using the distributed scheduler. Dask is able to pass around data wrapped in delayed objects with greater sophistication. 3. A Bag with a single partition You might also consider Dask Dataframe, whose join operations are much more heavily optimized. Parameters ---------- other: Iterable, Delayed, Bag Other collection on which to join on_self: callable Function to call on elements in this collection to determine a match on_other: callable (defaults to on_self) Function to call on elements in the other collection to determine a match Examples -------- >>> import dask.bag as db >>> people = db.from_sequence(['Alice', 'Bob', 'Charlie']) >>> fruit = ['Apple', 'Apricot', 'Banana'] >>> list(people.join(fruit, lambda x: x[0])) [('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')] """ name = "join-" + tokenize(self, other, on_self, on_other) dsk = {} if isinstance(other, Bag): if other.npartitions == 1: dsk.update(other.dask) other = other.__dask_keys__()[0] dsk[f"join-{name}-other"] = (list, other) else: msg = ( "Multi-bag joins are not implemented. " "We recommend Dask dataframe if appropriate" ) raise NotImplementedError(msg) elif isinstance(other, Delayed): dsk.update(other.dask) other = other._key elif not isinstance(other, Iterable): msg = ( "Joined argument must be single-partition Bag, " f" delayed object, or Iterable, got {type(other).__name}" ) raise TypeError(msg) if on_other is None: on_other = on_self for i in range(self.npartitions): dsk[(name, i)] = (list, (join, on_other, other, on_self, (self.name, i))) graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def product(self, other): """Cartesian product between two bags.""" assert isinstance(other, Bag) name = "product-" + tokenize(self, other) n, m = self.npartitions, other.npartitions dsk = { (name, i * m + j): ( list, (itertools.product, (self.name, i), (other.name, j)), ) for i in range(n) for j in range(m) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, other]) return type(self)(graph, name, n * m) def foldby( self, key, binop, initial=no_default, combine=None, combine_initial=no_default, split_every=None, ): """Combined reduction and groupby. Foldby provides a combined groupby and reduce for efficient parallel split-apply-combine tasks. The computation >>> b.foldby(key, binop, init) # doctest: +SKIP is equivalent to the following: >>> def reduction(group): # doctest: +SKIP ... return reduce(binop, group, init) # doctest: +SKIP >>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP But uses minimal communication and so is *much* faster. >>> import dask.bag as db >>> b = db.from_sequence(range(10)) >>> iseven = lambda x: x % 2 == 0 >>> add = lambda x, y: x + y >>> dict(b.foldby(iseven, add)) {True: 20, False: 25} **Key Function** The key function determines how to group the elements in your bag. In the common case where your bag holds dictionaries then the key function often gets out one of those elements. >>> def key(x): ... return x['name'] This case is so common that it is special cased, and if you provide a key that is not a callable function then dask.bag will turn it into one automatically. The following are equivalent: >>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP >>> b.foldby('name', ...) # doctest: +SKIP **Binops** It can be tricky to construct the right binary operators to perform analytic queries. The ``foldby`` method accepts two binary operators, ``binop`` and ``combine``. Binary operators two inputs and output must have the same type. Binop takes a running total and a new element and produces a new total: >>> def binop(total, x): ... return total + x['amount'] Combine takes two totals and combines them: >>> def combine(total1, total2): ... return total1 + total2 Each of these binary operators may have a default first value for total, before any other value is seen. For addition binary operators like above this is often ``0`` or the identity element for your operation. **split_every** Group partitions into groups of this size while performing reduction. Defaults to 8. >>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP Examples -------- We can compute the maximum of some ``(key, value)`` pairs, grouped by the ``key``. (You might be better off converting the ``Bag`` to a ``dask.dataframe`` and using its groupby). >>> import random >>> import dask.bag as db >>> tokens = list('abcdefg') >>> values = range(10000) >>> a = [(random.choice(tokens), random.choice(values)) ... for _ in range(100)] >>> a[:2] # doctest: +SKIP [('g', 676), ('a', 871)] >>> a = db.from_sequence(a) >>> def binop(t, x): ... return max((t, x), key=lambda x: x[1]) >>> a.foldby(lambda x: x[0], binop).compute() # doctest: +SKIP [('g', ('g', 984)), ('a', ('a', 871)), ('b', ('b', 999)), ('c', ('c', 765)), ('f', ('f', 955)), ('e', ('e', 991)), ('d', ('d', 854))] See Also -------- toolz.reduceby pyspark.combineByKey """ if split_every is None: split_every = 8 if split_every is False: split_every = self.npartitions token = tokenize(self, key, binop, initial, combine, combine_initial) a = "foldby-a-" + token if combine is None: combine = binop if initial is not no_default: dsk = { (a, i): (reduceby, key, binop, (self.name, i), initial) for i in range(self.npartitions) } else: dsk = { (a, i): (reduceby, key, binop, (self.name, i)) for i in range(self.npartitions) } combine2 = partial(chunk.foldby_combine2, combine) depth = 0 k = self.npartitions b = a while k > split_every: c = b + str(depth) if combine_initial is not no_default: for i, inds in enumerate(partition_all(split_every, range(k))): dsk[(c, i)] = ( reduceby, 0, combine2, (toolz.concat, (map, dictitems, [(b, j) for j in inds])), combine_initial, ) else: for i, inds in enumerate(partition_all(split_every, range(k))): dsk[(c, i)] = ( merge_with, (partial, reduce, combine), [(b, j) for j in inds], ) k = i + 1 b = c depth += 1 e = "foldby-b-" + token if combine_initial is not no_default: dsk[(e, 0)] = ( dictitems, ( reduceby, 0, combine2, (toolz.concat, (map, dictitems, [(b, j) for j in range(k)])), combine_initial, ), ) else: dsk[(e, 0)] = ( dictitems, (merge_with, (partial, reduce, combine), [(b, j) for j in range(k)]), ) graph = HighLevelGraph.from_collections(e, dsk, dependencies=[self]) return type(self)(graph, e, 1) def take(self, k, npartitions=1, compute=True, warn=True): """Take the first k elements. Parameters ---------- k : int The number of elements to return npartitions : int, optional Elements are only taken from the first ``npartitions``, with a default of 1. If there are fewer than ``k`` rows in the first ``npartitions`` a warning will be raised and any found rows returned. Pass -1 to use all partitions. compute : bool, optional Whether to compute the result, default is True. warn : bool, optional Whether to warn if the number of elements returned is less than requested, default is True. >>> import dask.bag as db >>> b = db.from_sequence(range(1_000)) >>> b.take(3) (0, 1, 2) """ if npartitions <= -1: npartitions = self.npartitions if npartitions > self.npartitions: raise ValueError( f"only {self.npartitions} partitions, take received {npartitions}" ) token = tokenize(self, k, npartitions) name = "take-" + token if npartitions > 1: name_p = "take-partial-" + token dsk = {} for i in range(npartitions): dsk[(name_p, i)] = (list, (take, k, (self.name, i))) concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)])) dsk[(name, 0)] = (safe_take, k, concat, warn) else: dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)} graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) b = Bag(graph, name, 1) if compute: return tuple(b.compute()) else: return b def flatten(self): """Concatenate nested lists into one long list. >>> import dask.bag as db >>> b = db.from_sequence([[1], [2, 3]]) >>> list(b) [[1], [2, 3]] >>> list(b.flatten()) [1, 2, 3] """ name = "flatten-" + tokenize(self) dsk = { (name, i): (list, (toolz.concat, (self.name, i))) for i in range(self.npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self]) return type(self)(graph, name, self.npartitions) def __iter__(self): return iter(self.compute()) def groupby( self, grouper, method=None, npartitions=None, blocksize=2**20, max_branch=None, shuffle=None, ): """Group collection by key function This requires a full dataset read, serialization and shuffle. This is expensive. If possible you should use ``foldby``. Parameters ---------- grouper: function Function on which to group elements shuffle: str Either 'disk' for an on-disk shuffle or 'tasks' to use the task scheduling framework. Use 'disk' if you are on a single machine and 'tasks' if you are on a distributed cluster. npartitions: int If using the disk-based shuffle, the number of output partitions blocksize: int If using the disk-based shuffle, the size of shuffle blocks (bytes) max_branch: int If using the task-based shuffle, the amount of splitting each partition undergoes. Increase this for fewer copies but more scheduler overhead. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(range(10)) >>> iseven = lambda x: x % 2 == 0 >>> dict(b.groupby(iseven)) # doctest: +SKIP {True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]} See Also -------- Bag.foldby """ if method is not None: raise Exception("The method= keyword has been moved to shuffle=") if shuffle is None: try: shuffle = get_default_shuffle_method() except ImportError: shuffle = "tasks" else: if shuffle == "p2p": # Not implemented for Bags shuffle = "tasks" if shuffle == "disk": return groupby_disk( self, grouper, npartitions=npartitions, blocksize=blocksize ) elif shuffle == "tasks": return groupby_tasks(self, grouper, max_branch=max_branch) else: msg = "Shuffle must be 'disk' or 'tasks'" raise NotImplementedError(msg) def to_dataframe(self, meta=None, columns=None, optimize_graph=True): """Create Dask Dataframe from a Dask Bag. Bag should contain tuples, dict records, or scalars. Index will not be particularly meaningful. Use ``reindex`` afterwards if necessary. Parameters ---------- meta : pd.DataFrame, dict, iterable, optional An empty ``pd.DataFrame`` that matches the dtypes and column names of the output. This metadata is necessary for many algorithms in dask dataframe to work. For ease of use, some alternative inputs are also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided. If not provided or a list, a single element from the first partition will be computed, triggering a potentially expensive call to ``compute``. This may lead to unexpected results, so providing ``meta`` is recommended. For more information, see ``dask.dataframe.utils.make_meta``. columns : sequence, optional Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). Note that if ``meta`` is provided, column names will be taken from there and this parameter is invalid. optimize_graph : bool, optional If True [default], the graph is optimized before converting into :class:`dask.dataframe.DataFrame`. Examples -------- >>> import dask.bag as db >>> b = db.from_sequence([{'name': 'Alice', 'balance': 100}, ... {'name': 'Bob', 'balance': 200}, ... {'name': 'Charlie', 'balance': 300}], ... npartitions=2) >>> df = b.to_dataframe() >>> df.compute() name balance 0 Alice 100 1 Bob 200 0 Charlie 300 """ import pandas as pd import dask.dataframe as dd if meta is None: head = self.take(1, warn=False) if len(head) == 0: raise ValueError( "`dask.bag.Bag.to_dataframe` failed to " "properly infer metadata, please pass in " "metadata via the `meta` keyword" ) meta = pd.DataFrame(list(head), columns=columns) elif columns is not None: raise ValueError("Can't specify both `meta` and `columns`") else: meta = dd.utils.make_meta(meta, parent_meta=pd.DataFrame()) # Serializing the columns and dtypes is much smaller than serializing # the empty frame cols = list(meta.columns) dtypes = meta.dtypes.to_dict() dfs = self.map_partitions(to_dataframe, cols, dtypes) if optimize_graph: dsk = self.__dask_optimize__(dfs.dask, dfs.__dask_keys__()) else: dsk = dfs.dask divisions = [None] * (self.npartitions + 1) from dask.dataframe.dask_expr import from_graph return from_graph(dsk, meta, divisions, dfs.__dask_keys__(), "from-bag") def to_delayed(self, optimize_graph=True): """Convert into a list of ``dask.delayed`` objects, one per partition. Parameters ---------- optimize_graph : bool, optional If True [default], the graph is optimized before converting into ``dask.delayed`` objects. See Also -------- dask.bag.from_delayed """ from dask.delayed import Delayed keys = self.__dask_keys__() dsk = self.__dask_graph__() layer = self.name if optimize_graph: dsk = self.__dask_optimize__(dsk, keys) layer = f"delayed-{layer}" dsk = HighLevelGraph.from_collections(layer, dsk, dependencies=()) return [Delayed(k, dsk, layer=layer) for k in keys] def repartition(self, npartitions=None, partition_size=None): """Repartition Bag across new divisions. Parameters ---------- npartitions : int, optional Number of partitions of output. partition_size : int or string, optional Max number of bytes of memory for each partition. Use numbers or strings like 5MB. .. warning:: This keyword argument triggers computation to determine the memory size of each partition, which may be expensive. Notes ----- Exactly one of ``npartitions`` or ``partition_size`` should be specified. A ``ValueError`` will be raised when that is not the case. Examples -------- >>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP """ if sum([partition_size is not None, npartitions is not None]) != 1: raise ValueError( "Please provide exactly one ``npartitions`` or ``partition_size`` keyword arguments" ) if npartitions is not None: return repartition_npartitions(self, npartitions) elif partition_size is not None: return repartition_size(self, partition_size) def accumulate(self, binop, initial=no_default): """Repeatedly apply binary function to a sequence, accumulating results. This assumes that the bag is ordered. While this is typically the case not all Dask.bag functions preserve this property. Examples -------- >>> import dask.bag as db >>> from operator import add >>> b = db.from_sequence([1, 2, 3, 4, 5], npartitions=2) >>> b.accumulate(add).compute() [1, 3, 6, 10, 15] Accumulate also takes an optional argument that will be used as the first value. >>> b.accumulate(add, initial=-1).compute() [-1, 0, 2, 5, 9, 14] """ token = tokenize(self, binop, initial) binop_name = funcname(binop) a = f"{binop_name}-part-{token}" b = f"{binop_name}-first-{token}" c = f"{binop_name}-second-{token}" dsk = { (a, 0): (accumulate_part, binop, (self.name, 0), initial, True), (b, 0): (first, (a, 0)), (c, 0): (second, (a, 0)), } for i in range(1, self.npartitions): dsk[(a, i)] = (accumulate_part, binop, (self.name, i), (c, i - 1)) dsk[(b, i)] = (first, (a, i)) dsk[(c, i)] = (second, (a, i)) graph = HighLevelGraph.from_collections(b, dsk, dependencies=[self]) return Bag(graph, b, self.npartitions) def accumulate_part(binop, seq, initial, is_first=False): if initial is no_default: res = list(accumulate(binop, seq)) else: res = list(accumulate(binop, seq, initial=initial)) if is_first: return res, res[-1] if res else [], initial return res[1:], res[-1] def partition(grouper, sequence, npartitions, p, nelements=2**20): """Partition a bag along a grouper, store partitions on disk.""" for block in partition_all(nelements, sequence): d = groupby(grouper, block) d2 = defaultdict(list) for k, v in d.items(): d2[abs(int(tokenize(k), 16)) % npartitions].extend(v) p.append(d2, fsync=True) return p def collect(grouper, group, p, barrier_token): """Collect partitions from disk and yield k,v group pairs.""" d = groupby(grouper, p.get(group, lock=False)) return list(d.items()) def from_sequence(seq, partition_size=None, npartitions=None): """Create a dask Bag from Python sequence. This sequence should be relatively small in memory. Dask Bag works best when it handles loading your data itself. Commonly we load a sequence of filenames into a Bag and then use ``.map`` to open them. Parameters ---------- seq: Iterable A sequence of elements to put into the dask partition_size: int (optional) The length of each partition npartitions: int (optional) The number of desired partitions It is best to provide either ``partition_size`` or ``npartitions`` (though not both.) Examples -------- >>> import dask.bag as db >>> b = db.from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2) See Also -------- read_text: Create bag from text files """ seq = list(seq) if npartitions and not partition_size: if len(seq) <= 100: partition_size = int(math.ceil(len(seq) / npartitions)) else: partition_size = max(1, int(math.floor(len(seq) / npartitions))) if npartitions is None and partition_size is None: if len(seq) <= 100: partition_size = 1 else: partition_size = max(1, math.ceil(math.sqrt(len(seq)) / math.sqrt(100))) parts = list(partition_all(partition_size, seq)) name = "from_sequence-" + tokenize(seq, partition_size) if len(parts) > 0: d = {(name, i): list(part) for i, part in enumerate(parts)} else: d = {(name, 0): []} return Bag(d, name, len(d)) def from_url(urls): """Create a dask Bag from a url. Examples -------- >>> a = from_url('http://raw.githubusercontent.com/dask/dask/main/README.rst') >>> a.npartitions 1 >>> a.take(8) # doctest: +SKIP (b'Dask\\n', b'====\\n', b'\\n', b'|Build Status| |Coverage| |Doc Status| |Discourse| |Version Status| |NumFOCUS|\\n', b'\\n', b'Dask is a flexible parallel computing library for analytics. See\\n', b'documentation_ for more information.\\n', b'\\n') >>> b = from_url(['http://github.com', 'http://google.com']) >>> b.npartitions 2 """ if isinstance(urls, str): urls = [urls] name = f"from_url-{uuid.uuid4().hex}" dsk = {} for i, u in enumerate(urls): dsk[(name, i)] = (list, (urlopen, u)) return Bag(dsk, name, len(urls)) def dictitems(d): """A pickleable version of dict.items >>> dictitems({'x': 1}) [('x', 1)] """ return list(d.items()) def concat(bags): """Concatenate many bags together, unioning all elements. >>> import dask.bag as db >>> a = db.from_sequence([1, 2, 3]) >>> b = db.from_sequence([4, 5, 6]) >>> c = db.concat([a, b]) >>> list(c) [1, 2, 3, 4, 5, 6] """ name = "concat-" + tokenize(*bags) counter = itertools.count(0) dsk = {(name, next(counter)): key for bag in bags for key in bag.__dask_keys__()} graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags) return Bag(graph, name, len(dsk)) def reify(seq): if isinstance(seq, Iterator): seq = list(seq) try: first = next(iter(seq)) except StopIteration: return seq # empty iterator if isinstance(first, Iterator): seq = list(map(list, seq)) return seq def from_delayed(values): """Create bag from many dask Delayed objects. These objects will become the partitions of the resulting Bag. They should evaluate to a ``list`` or some other concrete sequence. Parameters ---------- values: list of delayed values An iterable of dask Delayed objects. Each evaluating to a list. Returns ------- Bag Examples -------- >>> x, y, z = [delayed(load_sequence_from_file)(fn) ... for fn in filenames] # doctest: +SKIP >>> b = from_delayed([x, y, z]) # doctest: +SKIP See also -------- dask.delayed """ from dask.delayed import Delayed, delayed if isinstance(values, Delayed): values = [values] futures = [v for v in values if isinstance(v, TaskRef)] if all_futures := (len(futures) == len(values)): # All futures. Fast path values = futures else: # Every Delayed generates a Layer, i.e. this path is much more expensive # if there are many input values. values = [ delayed(v) if not isinstance(v, (Delayed,)) and hasattr(v, "key") else v for v in values ] name = "bag-from-delayed-" + tokenize(*values) tasks = [Task((name, i), reify, TaskRef(v.key)) for i, v in enumerate(values)] dsk = {t.key: t for t in tasks} graph = HighLevelGraph.from_collections( name, dsk, dependencies=values if not all_futures else () ) return Bag(graph, name, len(values)) def chunk_distinct(seq, key=None): if key is not None and not callable(key): key = partial(chunk.getitem, key=key) return list(unique(seq, key=key)) def merge_distinct(seqs, key=None): return chunk_distinct(toolz.concat(seqs), key=key) def merge_frequencies(seqs): if isinstance(seqs, Iterable): seqs = list(seqs) if not seqs: return {} first, rest = seqs[0], seqs[1:] if not rest: return first out = defaultdict(int) out.update(first) for d in rest: for k, v in d.items(): out[k] += v return out def bag_range(n, npartitions): """Numbers from zero to n Examples -------- >>> import dask.bag as db >>> b = db.range(5, npartitions=2) >>> list(b) [0, 1, 2, 3, 4] """ size = n // npartitions name = f"range-{n}-npartitions-{npartitions}" ijs = list(enumerate(take(npartitions, range(0, n, size)))) dsk = {(name, i): (reify, (range, j, min(j + size, n))) for i, j in ijs} if n % npartitions != 0: i, j = ijs[-1] dsk[(name, i)] = (reify, (range, j, n)) return Bag(dsk, name, npartitions) def bag_zip(*bags): """Partition-wise bag zip All passed bags must have the same number of partitions. NOTE: corresponding partitions should have the same length; if they do not, the "extra" elements from the longer partition(s) will be dropped. If you have this case chances are that what you really need is a data alignment mechanism like pandas's, and not a missing value filler like zip_longest. Examples -------- Correct usage: >>> import dask.bag as db >>> evens = db.from_sequence(range(0, 10, 2), partition_size=4) >>> odds = db.from_sequence(range(1, 10, 2), partition_size=4) >>> pairs = db.zip(evens, odds) >>> list(pairs) [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)] Incorrect usage: >>> numbers = db.range(31, npartitions=1) >>> fizz = numbers.filter(lambda n: n % 3 == 0) >>> buzz = numbers.filter(lambda n: n % 5 == 0) >>> fizzbuzz = db.zip(fizz, buzz) >>> list(fizzbuzz) [(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)] When what you really wanted was more along the lines of the following: >>> list(fizzbuzz) # doctest: +SKIP (0, 0), (3, None), (None, 5), (6, None), (9, None), (None, 10), (12, None), (15, 15), (18, None), (None, 20), (21, None), (24, None), (None, 25), (27, None), (30, 30) """ npartitions = bags[0].npartitions assert all(bag.npartitions == npartitions for bag in bags) # TODO: do more checks name = "zip-" + tokenize(*bags) dsk = { (name, i): (reify, (zip,) + tuple((bag.name, i) for bag in bags)) for i in range(npartitions) } graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags) return Bag(graph, name, npartitions) def map_chunk(f, iters, iter_kwarg_keys=None, kwargs=None): """Map ``f`` across one or more iterables, maybe with keyword arguments. Low-level function used in ``bag_map``, not user facing. Arguments --------- f : callable iters : List[Iterable] iter_kwarg_keys : List[str] or None Keyword names to use for pair with the tail end of ``iters``, allowing keyword arguments to be passed in from iterators. kwargs : dict or None Additional constant keyword arguments to use on every call to ``f``. """ if kwargs: f = partial(f, **kwargs) iters = [iter(a) for a in iters] return _MapChunk(f, iters, kwarg_keys=iter_kwarg_keys)
Bag
python
plotly__plotly.py
plotly/graph_objs/scatterpolargl/marker/colorbar/_title.py
{ "start": 233, "end": 4070 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scatterpolargl.marker.colorbar" _path_str = "scatterpolargl.marker.colorbar.title" _valid_props = {"font", "side", "text"} @property def font(self): """ Sets this color bar's title font. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.scatterpolargl.marker.colorbar.title.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.scatterpolargl.marker.colorbar.title.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def side(self): """ Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". The 'side' property is an enumeration that may be specified as: - One of the following enumeration values: ['right', 'top', 'bottom'] Returns ------- Any """ return self["side"] @side.setter def side(self, val): self["side"] = val @property def text(self): """ Sets the title of the color bar. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this color bar's title font. side Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". text Sets the title of the color bar. """ def __init__(self, arg=None, font=None, side=None, text=None, **kwargs): """ Construct a new Title object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatterpolargl .marker.colorbar.Title` font Sets this color bar's title font. side Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". text Sets the title of the color bar. Returns ------- Title """ super().__init__("title") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scatterpolargl.marker.colorbar.Title constructor must be a dict or an instance of :class:`plotly.graph_objs.scatterpolargl.marker.colorbar.Title`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("side", arg, side) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Title
python
matplotlib__matplotlib
lib/matplotlib/legend_handler.py
{ "start": 18613, "end": 18908 }
class ____(HandlerRegularPolyCollection): r"""Handler for `.CircleCollection`\s.""" def create_collection(self, orig_handle, sizes, offsets, offset_transform): return type(orig_handle)( sizes, offsets=offsets, offset_transform=offset_transform)
HandlerCircleCollection
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_merge_range04.py
{ "start": 315, "end": 905 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("merge_range04.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"align": "center", "bold": 1}) worksheet.merge_range(1, 1, 1, 3, "Foo", cell_format) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
apache__airflow
kubernetes-tests/tests/kubernetes_tests/test_kubernetes_pod_operator.py
{ "start": 3968, "end": 58813 }
class ____: @pytest.fixture(autouse=True) def setup_tests(self, test_label): self.api_client = ApiClient() self.labels = {"test_label": test_label} self.expected_pod = { "apiVersion": "v1", "kind": "Pod", "metadata": { "namespace": "default", "name": ANY, "annotations": {}, "labels": { "test_label": test_label, "kubernetes_pod_operator": "True", "airflow_version": airflow_version.replace("+", "-"), "airflow_kpo_in_cluster": "False", "run_id": "manual__2016-01-01T0100000100-da4d1ce7b", "dag_id": "dag", "task_id": ANY, "try_number": "1", }, }, "spec": { "affinity": {}, "containers": [ { "image": "ubuntu:16.04", "args": ["echo 10"], "command": ["bash", "-cx"], "env": [], "envFrom": [], "name": "base", "ports": [], "terminationMessagePolicy": "File", "volumeMounts": [], } ], "hostNetwork": False, "imagePullSecrets": [], "initContainers": [], "nodeSelector": {}, "restartPolicy": "Never", "securityContext": {}, "tolerations": [], "volumes": [], }, } yield hook = KubernetesHook(conn_id=None, in_cluster=False) client = hook.core_v1_client client.delete_collection_namespaced_pod(namespace="default", grace_period_seconds=0) @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): """Create kubernetes_default connection""" connection = Connection( conn_id="kubernetes_default", conn_type="kubernetes", ) create_connection_without_db(connection) def _get_labels_selector(self) -> str | None: if not self.labels: return None return ",".join([f"{key}={value}" for key, value in enumerate(self.labels)]) def test_do_xcom_push_defaults_false(self, kubeconfig_path, tmp_path): new_config_path = tmp_path / "kube_config.cfg" shutil.copy(kubeconfig_path, new_config_path) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, config_file=os.fspath(new_config_path), ) assert not k.do_xcom_push def test_config_path_move(self, kubeconfig_path, tmp_path): new_config_path = tmp_path / "kube_config.cfg" shutil.copy(kubeconfig_path, new_config_path) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.KEEP_POD, config_file=os.fspath(new_config_path), ) context = create_context(k) k.execute(context) expected_pod = copy(self.expected_pod) actual_pod = self.api_client.sanitize_for_serialization(k.pod) assert actual_pod == expected_pod def test_working_pod(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) assert self.expected_pod["spec"] == actual_pod["spec"] assert self.expected_pod["metadata"]["labels"] == actual_pod["metadata"]["labels"] def test_skip_cleanup(self): k = KubernetesPodOperator( namespace="unknown", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ) context = create_context(k) with pytest.raises(ApiException): k.execute(context) def test_delete_operator_pod(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.DELETE_POD, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) assert self.expected_pod["spec"] == actual_pod["spec"] assert self.expected_pod["metadata"]["labels"] == actual_pod["metadata"]["labels"] def test_skip_on_specified_exit_code(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["exit 42"], task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.DELETE_POD, skip_on_exit_code=42, ) context = create_context(k) with pytest.raises(AirflowSkipException): k.execute(context) def test_already_checked_on_success(self): """ When ``on_finish_action="keep_pod"``, pod should have 'already_checked' label, whether pod is successful or not. """ k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.KEEP_POD, ) context = create_context(k) k.execute(context) actual_pod = k.find_pod("default", context, exclude_checked=False) actual_pod = self.api_client.sanitize_for_serialization(actual_pod) assert actual_pod["metadata"]["labels"]["already_checked"] == "True" def test_already_checked_on_failure(self): """ When ``on_finish_action="keep_pod"``, pod should have 'already_checked' label, whether pod is successful or not. """ k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["lalala"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.KEEP_POD, ) context = create_context(k) with pytest.raises(AirflowException): k.execute(context) actual_pod = k.find_pod("default", context, exclude_checked=False) actual_pod = self.api_client.sanitize_for_serialization(actual_pod) status = next(x for x in actual_pod["status"]["containerStatuses"] if x["name"] == "base") assert status["state"]["terminated"]["reason"] == "Error" assert actual_pod["metadata"]["labels"]["already_checked"] == "True" def test_pod_hostnetwork(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, hostnetwork=True, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["hostNetwork"] = True assert self.expected_pod["spec"] == actual_pod["spec"] assert self.expected_pod["metadata"]["labels"] == actual_pod["metadata"]["labels"] def test_pod_dnspolicy(self): dns_policy = "ClusterFirstWithHostNet" k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, hostnetwork=True, dnspolicy=dns_policy, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["hostNetwork"] = True self.expected_pod["spec"]["dnsPolicy"] = dns_policy assert self.expected_pod["spec"] == actual_pod["spec"] assert self.expected_pod["metadata"]["labels"] == actual_pod["metadata"]["labels"] def test_pod_schedulername(self): scheduler_name = "default-scheduler" k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, schedulername=scheduler_name, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["schedulerName"] = scheduler_name assert self.expected_pod == actual_pod def test_pod_node_selector(self): node_selector = {"beta.kubernetes.io/os": "linux"} k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, node_selector=node_selector, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["nodeSelector"] = node_selector assert self.expected_pod == actual_pod def test_pod_resources(self): resources = k8s.V1ResourceRequirements( requests={"memory": "64Mi", "cpu": "250m", "ephemeral-storage": "1Gi"}, limits={"memory": "64Mi", "cpu": 0.25, "nvidia.com/gpu": None, "ephemeral-storage": "2Gi"}, ) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, container_resources=resources, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["resources"] = { "requests": {"memory": "64Mi", "cpu": "250m", "ephemeral-storage": "1Gi"}, "limits": {"memory": "64Mi", "cpu": 0.25, "nvidia.com/gpu": None, "ephemeral-storage": "2Gi"}, } assert self.expected_pod == actual_pod @pytest.mark.parametrize( "val", [ pytest.param( k8s.V1Affinity( node_affinity=k8s.V1NodeAffinity( required_during_scheduling_ignored_during_execution=k8s.V1NodeSelector( node_selector_terms=[ k8s.V1NodeSelectorTerm( match_expressions=[ k8s.V1NodeSelectorRequirement( key="beta.kubernetes.io/os", operator="In", values=["linux"], ) ] ) ] ) ) ), id="current", ), pytest.param( { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [ { "matchExpressions": [ { "key": "beta.kubernetes.io/os", "operator": "In", "values": ["linux"], } ] } ] } } }, id="backcompat", ), ], ) def test_pod_affinity(self, val): expected = { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [ { "matchExpressions": [ {"key": "beta.kubernetes.io/os", "operator": "In", "values": ["linux"]} ] } ] } } } k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, affinity=val, ) context = create_context(k) k.execute(context=context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["affinity"] = expected assert self.expected_pod == actual_pod def test_port(self): port = k8s.V1ContainerPort( name="http", container_port=80, ) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ports=[port], ) context = create_context(k) k.execute(context=context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["ports"] = [{"name": "http", "containerPort": 80}] assert self.expected_pod == actual_pod def test_volume_mount(self): with mock.patch.object(PodManager, "log") as mock_logger: volume_mount = k8s.V1VolumeMount( name="test-volume", mount_path="/tmp/test_volume", sub_path=None, read_only=False ) volume = k8s.V1Volume( name="test-volume", persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name="test-volume"), ) args = [ 'echo "retrieved from mount" > /tmp/test_volume/test.txt && cat /tmp/test_volume/test.txt' ] k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=args, labels=self.labels, volume_mounts=[volume_mount], volumes=[volume], task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, container_name_log_prefix_enabled=False, ) context = create_context(k) k.execute(context=context) mock_logger.info.assert_any_call("%s", StringContainingId("retrieved from mount")) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["args"] = args self.expected_pod["spec"]["containers"][0]["volumeMounts"] = [ {"name": "test-volume", "mountPath": "/tmp/test_volume", "readOnly": False} ] self.expected_pod["spec"]["volumes"] = [ {"name": "test-volume", "persistentVolumeClaim": {"claimName": "test-volume"}} ] assert self.expected_pod == actual_pod @pytest.mark.parametrize("uid", [0, 1000]) def test_run_as_user(self, uid): security_context = V1PodSecurityContext(run_as_user=uid) name = str(uuid4()) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], task_id=name, name=name, random_name_suffix=False, on_finish_action=OnFinishAction.KEEP_POD, in_cluster=False, do_xcom_push=False, security_context=security_context, ) context = create_context(k) k.execute(context) pod = k.hook.core_v1_client.read_namespaced_pod( name=name, namespace="default", ) assert pod.to_dict()["spec"]["security_context"]["run_as_user"] == uid @pytest.mark.parametrize("gid", [0, 1000]) def test_fs_group(self, gid): security_context = V1PodSecurityContext(fs_group=gid) name = str(uuid4()) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], task_id=name, name=name, random_name_suffix=False, on_finish_action=OnFinishAction.KEEP_POD, in_cluster=False, do_xcom_push=False, security_context=security_context, ) context = create_context(k) k.execute(context) pod = k.hook.core_v1_client.read_namespaced_pod( name=name, namespace="default", ) assert pod.to_dict()["spec"]["security_context"]["fs_group"] == gid def test_disable_privilege_escalation(self): container_security_context = V1SecurityContext(allow_privilege_escalation=False) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, container_security_context=container_security_context, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["securityContext"] = { "allowPrivilegeEscalation": container_security_context.allow_privilege_escalation } assert self.expected_pod == actual_pod def test_faulty_image(self): bad_image_name = "foobar" k = KubernetesPodOperator( namespace="default", image=bad_image_name, cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, startup_timeout_seconds=5, ) context = create_context(k) with pytest.raises(AirflowException, match="Pod .* returned a failure"): k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["image"] = bad_image_name assert self.expected_pod == actual_pod def test_faulty_service_account(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, startup_timeout_seconds=5, service_account_name="foobar", ) context = create_context(k) pod = k.build_pod_request_obj(context) with pytest.raises(ApiException, match="error looking up service account default/foobar"): k.get_or_create_pod(pod, context) def test_pod_failure(self): """ Tests that the task fails when a pod reports a failure """ bad_internal_command = ["foobar 10 "] k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=bad_internal_command, labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ) context = create_context(k) with pytest.raises(AirflowException, match="Pod .* returned a failure"): k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["args"] = bad_internal_command assert self.expected_pod == actual_pod def test_xcom_push(self, test_label): expected = {"test_label": test_label, "buzz": 2} args = [f"echo '{json.dumps(expected)}' > /airflow/xcom/return.json"] k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=args, labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=True, ) context = create_context(k) assert k.execute(context) == expected def test_env_vars(self): # WHEN env_vars = [ k8s.V1EnvVar(name="ENV1", value="val1"), k8s.V1EnvVar(name="ENV2", value="val2"), k8s.V1EnvVar( name="ENV3", value_from=k8s.V1EnvVarSource(field_ref=k8s.V1ObjectFieldSelector(field_path="status.podIP")), ), ] k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], env_vars=env_vars, labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ) # THEN context = create_context(k) actual_pod = self.api_client.sanitize_for_serialization(k.build_pod_request_obj(context)) self.expected_pod["spec"]["containers"][0]["env"] = [ {"name": "ENV1", "value": "val1"}, {"name": "ENV2", "value": "val2"}, {"name": "ENV3", "valueFrom": {"fieldRef": {"fieldPath": "status.podIP"}}}, ] assert self.expected_pod == actual_pod def test_pod_template_file_system(self, basic_pod_template): """Note: this test requires that you have a namespace ``mem-example`` in your cluster.""" k = KubernetesPodOperator( task_id=str(uuid4()), in_cluster=False, labels=self.labels, pod_template_file=basic_pod_template.as_posix(), do_xcom_push=True, ) context = create_context(k) result = k.execute(context) assert result is not None assert result == {"hello": "world"} @pytest.mark.parametrize( "env_vars", [ pytest.param([k8s.V1EnvVar(name="env_name", value="value")], id="current"), pytest.param({"env_name": "value"}, id="backcompat"), # todo: remove? ], ) def test_pod_template_file_with_overrides_system(self, env_vars, test_label, basic_pod_template): k = KubernetesPodOperator( task_id=str(uuid4()), labels=self.labels, env_vars=env_vars, in_cluster=False, pod_template_file=basic_pod_template.as_posix(), do_xcom_push=True, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { "test_label": test_label, "airflow_version": mock.ANY, "airflow_kpo_in_cluster": "False", "dag_id": "dag", "run_id": "manual__2016-01-01T0100000100-da4d1ce7b", "kubernetes_pod_operator": "True", "task_id": mock.ANY, "try_number": "1", } assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")] assert result == {"hello": "world"} def test_pod_template_file_with_full_pod_spec(self, test_label, basic_pod_template): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta( labels={"test_label": test_label, "fizz": "buzz"}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", env=[k8s.V1EnvVar(name="env_name", value="value")], ) ] ), ) k = KubernetesPodOperator( task_id=str(uuid4()), labels=self.labels, in_cluster=False, pod_template_file=basic_pod_template.as_posix(), full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { "fizz": "buzz", "test_label": test_label, "airflow_version": mock.ANY, "airflow_kpo_in_cluster": "False", "dag_id": "dag", "run_id": "manual__2016-01-01T0100000100-da4d1ce7b", "kubernetes_pod_operator": "True", "task_id": mock.ANY, "try_number": "1", } assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")] assert result == {"hello": "world"} def test_full_pod_spec(self, test_label): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta( labels={"test_label": test_label, "fizz": "buzz"}, namespace="default", name="test-pod" ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="perl", command=["/bin/bash"], args=["-c", 'echo {\\"hello\\" : \\"world\\"} | cat > /airflow/xcom/return.json'], env=[k8s.V1EnvVar(name="env_name", value="value")], ) ], restart_policy="Never", ), ) k = KubernetesPodOperator( task_id=str(uuid4()), in_cluster=False, labels=self.labels, full_pod_spec=pod_spec, do_xcom_push=True, on_finish_action=OnFinishAction.KEEP_POD, startup_timeout_seconds=30, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { "fizz": "buzz", "test_label": test_label, "airflow_version": mock.ANY, "airflow_kpo_in_cluster": "False", "dag_id": "dag", "run_id": "manual__2016-01-01T0100000100-da4d1ce7b", "kubernetes_pod_operator": "True", "task_id": mock.ANY, "try_number": "1", } assert k.pod.spec.containers[0].env == [k8s.V1EnvVar(name="env_name", value="value")] assert result == {"hello": "world"} def test_init_container(self): # GIVEN volume_mounts = [ k8s.V1VolumeMount(mount_path="/etc/foo", name="test-volume", sub_path=None, read_only=True) ] init_environments = [ k8s.V1EnvVar(name="key1", value="value1"), k8s.V1EnvVar(name="key2", value="value2"), ] init_container = k8s.V1Container( name="init-container", image="ubuntu:16.04", env=init_environments, volume_mounts=volume_mounts, command=["bash", "-cx"], args=["echo 10"], ) volume = k8s.V1Volume( name="test-volume", persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name="test-volume"), ) expected_init_container = { "name": "init-container", "image": "ubuntu:16.04", "command": ["bash", "-cx"], "args": ["echo 10"], "env": [{"name": "key1", "value": "value1"}, {"name": "key2", "value": "value2"}], "volumeMounts": [{"mountPath": "/etc/foo", "name": "test-volume", "readOnly": True}], } k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), volumes=[volume], init_containers=[init_container], in_cluster=False, do_xcom_push=False, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["initContainers"] = [expected_init_container] self.expected_pod["spec"]["volumes"] = [ {"name": "test-volume", "persistentVolumeClaim": {"claimName": "test-volume"}} ] assert self.expected_pod == actual_pod @mock.patch(f"{POD_MANAGER_CLASS}.await_xcom_sidecar_container_start") @mock.patch(f"{POD_MANAGER_CLASS}.extract_xcom") @mock.patch(f"{POD_MANAGER_CLASS}.await_pod_completion") @mock.patch(f"{POD_MANAGER_CLASS}.watch_pod_events", new=AsyncMock()) @mock.patch(f"{POD_MANAGER_CLASS}.await_pod_start", new=AsyncMock()) @mock.patch(f"{POD_MANAGER_CLASS}.create_pod", new=MagicMock) @mock.patch(HOOK_CLASS) def test_pod_template_file( self, hook_mock, await_pod_completion_mock, extract_xcom_mock, await_xcom_sidecar_container_start_mock, caplog, test_label, pod_template, ): # todo: This isn't really a system test await_xcom_sidecar_container_start_mock.return_value = None hook_mock.return_value.is_in_cluster = False hook_mock.return_value.get_xcom_sidecar_container_image.return_value = None hook_mock.return_value.get_xcom_sidecar_container_resources.return_value = None hook_mock.return_value.get_connection.return_value = Connection(conn_id="kubernetes_default") extract_xcom_mock.return_value = "{}" k = KubernetesPodOperator( task_id=str(uuid4()), labels=self.labels, random_name_suffix=False, pod_template_file=pod_template.as_posix(), do_xcom_push=True, ) pod_mock = MagicMock() pod_mock.status.phase = "Succeeded" await_pod_completion_mock.return_value = pod_mock context = create_context(k) # TODO: once Airflow 3.1 is the min version, replace this with out structlog-based caplog fixture with mock.patch.object(k.log, "debug") as debug_logs: k.execute(context) expected_lines = "\n".join( [ "api_version: v1", "kind: Pod", "metadata:", " annotations: {}", " creation_timestamp: null", " deletion_grace_period_seconds: null", ] ) # Make a nice assert if it's not there debug_logs.assert_any_call("Starting pod:\n%s", mock.ANY) # Now we know it is there, examine the second argument mock_call = next(call for call in debug_logs.mock_calls if call[1][0] == "Starting pod:\n%s") assert mock_call[1][1][: len(expected_lines)] == expected_lines actual_pod = self.api_client.sanitize_for_serialization(k.pod) expected_dict = { "apiVersion": "v1", "kind": "Pod", "metadata": { "annotations": {}, "labels": { "test_label": test_label, "airflow_kpo_in_cluster": "False", "dag_id": "dag", "run_id": "manual__2016-01-01T0100000100-da4d1ce7b", "kubernetes_pod_operator": "True", "task_id": mock.ANY, "try_number": "1", }, "name": "memory-demo", "namespace": "mem-example", }, "spec": { "affinity": {}, "containers": [ { "args": ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"], "command": ["stress"], "env": [], "envFrom": [], "image": "ghcr.io/apache/airflow-stress:1.0.4-2021.07.04", "name": "base", "ports": [], "resources": {"limits": {"memory": "200Mi"}, "requests": {"memory": "100Mi"}}, "terminationMessagePolicy": "File", "volumeMounts": [{"mountPath": "/airflow/xcom", "name": "xcom"}], }, { "command": ["sh", "-c", 'trap "exit 0" INT; while true; do sleep 1; done;'], "image": "alpine", "name": "airflow-xcom-sidecar", "resources": { "requests": {"cpu": "1m", "memory": "10Mi"}, }, "volumeMounts": [{"mountPath": "/airflow/xcom", "name": "xcom"}], }, ], "hostNetwork": False, "imagePullSecrets": [], "initContainers": [], "nodeSelector": {}, "restartPolicy": "Never", "securityContext": {}, "tolerations": [], "volumes": [{"emptyDir": {}, "name": "xcom"}], }, } version = actual_pod["metadata"]["labels"]["airflow_version"] assert version.startswith(airflow_version) del actual_pod["metadata"]["labels"]["airflow_version"] assert expected_dict == actual_pod @mock.patch(f"{POD_MANAGER_CLASS}.await_pod_completion") @mock.patch(f"{POD_MANAGER_CLASS}.watch_pod_events", new=AsyncMock()) @mock.patch(f"{POD_MANAGER_CLASS}.await_pod_start", new=AsyncMock()) @mock.patch(f"{POD_MANAGER_CLASS}.create_pod", new=MagicMock) @mock.patch(HOOK_CLASS) def test_pod_priority_class_name(self, hook_mock, await_pod_completion_mock): """ Test ability to assign priorityClassName to pod todo: This isn't really a system test """ hook_mock.return_value.is_in_cluster = False hook_mock.return_value.get_connection.return_value = Connection(conn_id="kubernetes_default") priority_class_name = "medium-test" k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, priority_class_name=priority_class_name, ) pod_mock = MagicMock() pod_mock.status.phase = "Succeeded" await_pod_completion_mock.return_value = pod_mock context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["priorityClassName"] = priority_class_name assert self.expected_pod == actual_pod def test_pod_name(self): pod_name_too_long = "a" * 221 k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, name=pod_name_too_long, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, ) # Name is now in template fields, and it's final value requires context # so we need to execute for name validation context = create_context(k) with pytest.raises(AirflowException): k.execute(context) def test_on_kill(self): hook = KubernetesHook(conn_id=None, in_cluster=False) client = hook.core_v1_client name = "test" namespace = "default" k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["sleep 1000"], labels=self.labels, name=name, task_id=name, in_cluster=False, do_xcom_push=False, get_logs=False, termination_grace_period=0, ) context = create_context(k) class ShortCircuitException(Exception): pass # use this mock to short circuit and NOT wait for container completion with mock.patch.object( k.pod_manager, "await_container_completion", side_effect=ShortCircuitException() ): # cleanup will be upset since the pod should not be completed.. so skip it with mock.patch.object(k, "cleanup"): with pytest.raises(ShortCircuitException): k.execute(context) # when we get here, the pod should still be running name = k.pod.metadata.name pod = client.read_namespaced_pod(name=name, namespace=namespace) assert pod.status.phase == "Running" k.on_kill() with pytest.raises(ApiException, match=r'pods \\"test.[a-z0-9]+\\" not found'): client.read_namespaced_pod(name=name, namespace=namespace) def test_reattach_failing_pod_once(self): hook = KubernetesHook(conn_id=None, in_cluster=False) client = hook.core_v1_client name = "test" namespace = "default" def get_op(): return KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["exit 1"], labels=self.labels, name="test", task_id=name, in_cluster=False, do_xcom_push=False, on_finish_action=OnFinishAction.KEEP_POD, termination_grace_period=0, ) k = get_op() context = create_context(k) # launch pod with mock.patch(f"{POD_MANAGER_CLASS}.await_pod_completion") as await_pod_completion_mock: pod_mock = MagicMock() pod_mock.status.phase = "Succeeded" await_pod_completion_mock.return_value = pod_mock # we want to simulate that there was a worker failure and the airflow operator process # was killed without running the cleanup process. in this case the pod will not be marked as # already checked k.cleanup = MagicMock() k.execute(context) name = k.pod.metadata.name pod = client.read_namespaced_pod(name=name, namespace=namespace) while pod.status.phase != "Failed": pod = client.read_namespaced_pod(name=name, namespace=namespace) assert "already_checked" not in pod.metadata.labels # create a new version of the same operator instance to remove the monkey patching in first # part of the test k = get_op() # `create_pod` should not be called because there's a pod there it should find # should use the found pod and patch as "already_checked" (in failure block) with mock.patch(f"{POD_MANAGER_CLASS}.create_pod") as create_mock: with pytest.raises(AirflowException): k.execute(context) pod = client.read_namespaced_pod(name=name, namespace=namespace) assert pod.metadata.labels["already_checked"] == "True" create_mock.assert_not_called() # recreate op just to ensure we're not relying on any statefulness k = get_op() # `create_pod` should be called because though there's still a pod to be found, # it will be `already_checked` with mock.patch(f"{POD_MANAGER_CLASS}.create_pod") as create_mock: with pytest.raises(ApiException, match=r'pods \\"test.[a-z0-9]+\\" not found'): k.execute(context) create_mock.assert_called_once() def test_changing_base_container_name_with_get_logs(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, get_logs=True, base_container_name="apple-sauce", ) assert k.base_container_name == "apple-sauce" context = create_context(k) with mock.patch.object( k.pod_manager, "fetch_container_logs", wraps=k.pod_manager.fetch_container_logs ) as mock_fetch_container_logs: k.execute(context) assert mock_fetch_container_logs.call_args[1]["container_name"] == "apple-sauce" actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["name"] = "apple-sauce" assert self.expected_pod["spec"] == actual_pod["spec"] def test_changing_base_container_name_no_logs(self): """ This test checks BOTH a modified base container name AND the get_logs=False flow, and as a result, also checks that the flow works with fast containers See https://github.com/apache/airflow/issues/26796 """ k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, get_logs=False, base_container_name="apple-sauce", ) assert k.base_container_name == "apple-sauce" context = create_context(k) with mock.patch.object( k.pod_manager, "await_container_completion", wraps=k.pod_manager.await_container_completion ) as mock_await_container_completion: k.execute(context) assert mock_await_container_completion.call_args[1]["container_name"] == "apple-sauce" actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["name"] = "apple-sauce" assert self.expected_pod["spec"] == actual_pod["spec"] def test_changing_base_container_name_no_logs_long(self): """ Similar to test_changing_base_container_name_no_logs, but ensures that pods running longer than 1 second work too. See https://github.com/apache/airflow/issues/26796 """ k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["sleep 3"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, get_logs=False, base_container_name="apple-sauce", ) assert k.base_container_name == "apple-sauce" context = create_context(k) with mock.patch.object( k.pod_manager, "await_container_completion", wraps=k.pod_manager.await_container_completion ) as mock_await_container_completion: k.execute(context) assert mock_await_container_completion.call_args[1]["container_name"] == "apple-sauce" actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod["spec"]["containers"][0]["name"] = "apple-sauce" self.expected_pod["spec"]["containers"][0]["args"] = ["sleep 3"] assert self.expected_pod["spec"] == actual_pod["spec"] def test_changing_base_container_name_failure(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["exit"], arguments=["1"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, base_container_name="apple-sauce", ) assert k.base_container_name == "apple-sauce" context = create_context(k) class ShortCircuitException(Exception): pass with mock.patch( "airflow.providers.cncf.kubernetes.operators.pod.get_container_termination_message", side_effect=ShortCircuitException(), ) as mock_get_container_termination_message: with pytest.raises(ShortCircuitException): k.execute(context) assert mock_get_container_termination_message.call_args[0][1] == "apple-sauce" def test_base_container_name_init_precedence(self): assert ( KubernetesPodOperator(base_container_name="apple-sauce", task_id=str(uuid4())).base_container_name == "apple-sauce" ) assert ( KubernetesPodOperator(task_id=str(uuid4())).base_container_name == KubernetesPodOperator.BASE_CONTAINER_NAME ) class MyK8SPodOperator(KubernetesPodOperator): BASE_CONTAINER_NAME = "tomato-sauce" assert ( MyK8SPodOperator(base_container_name="apple-sauce", task_id=str(uuid4())).base_container_name == "apple-sauce" ) assert MyK8SPodOperator(task_id=str(uuid4())).base_container_name == "tomato-sauce" def test_init_container_logs(self): marker_from_init_container = f"{uuid4()}" marker_from_main_container = f"{uuid4()}" callback = MagicMock() init_container = k8s.V1Container( name="init-container", image="busybox", command=["sh", "-cx"], args=[f"echo {marker_from_init_container}"], ) k = KubernetesPodOperator( namespace="default", image="busybox", cmds=["sh", "-cx"], arguments=[f"echo {marker_from_main_container}"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, startup_timeout_seconds=60, init_containers=[init_container], init_container_logs=True, callbacks=callback, ) context = create_context(k) k.execute(context) calls_args = "\n".join(["".join(c.kwargs["line"]) for c in callback.progress_callback.call_args_list]) assert marker_from_init_container in calls_args assert marker_from_main_container in calls_args def test_init_container_logs_filtered(self): marker_from_init_container_to_log_1 = f"{uuid4()}" marker_from_init_container_to_log_2 = f"{uuid4()}" marker_from_init_container_to_ignore = f"{uuid4()}" marker_from_main_container = f"{uuid4()}" callback = MagicMock() init_container_to_log_1 = k8s.V1Container( name="init-container-to-log-1", image="busybox", command=["sh", "-cx"], args=[f"echo {marker_from_init_container_to_log_1}"], ) init_container_to_log_2 = k8s.V1Container( name="init-container-to-log-2", image="busybox", command=["sh", "-cx"], args=[f"echo {marker_from_init_container_to_log_2}"], ) init_container_to_ignore = k8s.V1Container( name="init-container-to-ignore", image="busybox", command=["sh", "-cx"], args=[f"echo {marker_from_init_container_to_ignore}"], ) k = KubernetesPodOperator( namespace="default", image="busybox", cmds=["sh", "-cx"], arguments=[f"echo {marker_from_main_container}"], labels=self.labels, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, startup_timeout_seconds=60, init_containers=[ init_container_to_log_1, init_container_to_log_2, init_container_to_ignore, ], init_container_logs=[ # not same order as defined in init_containers "init-container-to-log-2", "init-container-to-log-1", ], callbacks=callback, ) context = create_context(k) k.execute(context) calls_args = "\n".join(["".join(c.kwargs["line"]) for c in callback.progress_callback.call_args_list]) assert marker_from_init_container_to_log_1 in calls_args assert marker_from_init_container_to_log_2 in calls_args assert marker_from_init_container_to_ignore not in calls_args assert marker_from_main_container in calls_args assert ( calls_args.find(marker_from_init_container_to_log_1) < calls_args.find(marker_from_init_container_to_log_2) < calls_args.find(marker_from_main_container) ) @pytest.mark.parametrize( ("log_prefix_enabled", "log_formatter", "expected_log_message_check"), [ pytest.param( True, None, lambda marker, record_message: f"[base] {marker}" in record_message, id="log_prefix_enabled", ), pytest.param( False, None, lambda marker, record_message: marker in record_message and "[base]" not in record_message, id="log_prefix_disabled", ), pytest.param( False, # Ignored when log_formatter is provided lambda container_name, message: f"CUSTOM[{container_name}]: {message}", lambda marker, record_message: f"CUSTOM[base]: {marker}" in record_message, id="custom_log_formatter", ), ], ) def test_log_output_configurations(self, log_prefix_enabled, log_formatter, expected_log_message_check): """ Tests various log output configurations (container_name_log_prefix_enabled, log_formatter) for KubernetesPodOperator. """ marker = f"test_log_{uuid4()}" k = KubernetesPodOperator( namespace="default", image="busybox", cmds=["sh", "-cx"], arguments=[f"echo {marker}"], labels={"test_label": "test"}, task_id=str(uuid4()), in_cluster=False, do_xcom_push=False, get_logs=True, container_name_log_prefix_enabled=log_prefix_enabled, log_formatter=log_formatter, ) # Test the _log_message method directly with mock.patch.object(k.pod_manager.log, "info") as mock_info: k.pod_manager._log_message( message=marker, container_name="base", container_name_log_prefix_enabled=log_prefix_enabled, log_formatter=log_formatter, ) # Check that the message was logged with the expected format mock_info.assert_called_once() logged_message = mock_info.call_args[0][1] # Second argument is the message assert expected_log_message_check(marker, logged_message) # TODO: Task SDK: https://github.com/apache/airflow/issues/45438 @pytest.mark.skip(reason="AIP-72: Secret Masking yet to be implemented") def test_hide_sensitive_field_in_templated_fields_on_error(caplog, monkeypatch): logger = logging.getLogger("airflow.task") monkeypatch.setattr(logger, "propagate", True) class Var: def __getattr__(self, name): raise KeyError(name) context = { "password": "secretpassword", "var": Var(), } from airflow.providers.cncf.kubernetes.operators.pod import ( KubernetesPodOperator, ) task = KubernetesPodOperator( task_id="dry_run_demo", name="hello-dry-run", image="python:3.10-slim-buster", cmds=["printenv"], env_vars=[ V1EnvVar(name="password", value="{{ password }}"), V1EnvVar(name="VAR2", value="{{ var.value.nonexisting}}"), ], ) with pytest.raises(KeyError): task.render_template_fields(context=context) assert "password" in caplog.text assert "secretpassword" not in caplog.text
TestKubernetesPodOperatorSystem
python
pallets__click
src/click/types.py
{ "start": 16469, "end": 19122 }
class ____(_NumberParamTypeBase): def __init__( self, min: float | None = None, max: float | None = None, min_open: bool = False, max_open: bool = False, clamp: bool = False, ) -> None: self.min = min self.max = max self.min_open = min_open self.max_open = max_open self.clamp = clamp def to_info_dict(self) -> dict[str, t.Any]: info_dict = super().to_info_dict() info_dict.update( min=self.min, max=self.max, min_open=self.min_open, max_open=self.max_open, clamp=self.clamp, ) return info_dict def convert( self, value: t.Any, param: Parameter | None, ctx: Context | None ) -> t.Any: import operator rv = super().convert(value, param, ctx) lt_min: bool = self.min is not None and ( operator.le if self.min_open else operator.lt )(rv, self.min) gt_max: bool = self.max is not None and ( operator.ge if self.max_open else operator.gt )(rv, self.max) if self.clamp: if lt_min: return self._clamp(self.min, 1, self.min_open) # type: ignore if gt_max: return self._clamp(self.max, -1, self.max_open) # type: ignore if lt_min or gt_max: self.fail( _("{value} is not in the range {range}.").format( value=rv, range=self._describe_range() ), param, ctx, ) return rv def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float: """Find the valid value to clamp to bound in the given direction. :param bound: The boundary value. :param dir: 1 or -1 indicating the direction to move. :param open: If true, the range does not include the bound. """ raise NotImplementedError def _describe_range(self) -> str: """Describe the range for use in help text.""" if self.min is None: op = "<" if self.max_open else "<=" return f"x{op}{self.max}" if self.max is None: op = ">" if self.min_open else ">=" return f"x{op}{self.min}" lop = "<" if self.min_open else "<=" rop = "<" if self.max_open else "<=" return f"{self.min}{lop}x{rop}{self.max}" def __repr__(self) -> str: clamp = " clamped" if self.clamp else "" return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
_NumberRangeBase
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_url_hostname_match_with_cert.py
{ "start": 2074, "end": 4637 }
class ____(ColumnMapExpectation): """Expect provided url's hostname match with cert.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "all_valid": [ "google.com", "index.hu", "yahoo.com", "github.com", "microsoft.com", ], "some_other": [ "google.com", "index.hu", "yahoo.com", "github.com", "wrong.host.badssl.com", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_valid"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_other", "mostly": 1}, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.match_cert" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": [ "hackathon-22", "experimental", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@szecsip", # Don't forget to add your github handle here! ], } if __name__ == "__main__": ExpectColumnValuesUrlHostnameMatchWithCert().print_diagnostic_checklist()
ExpectColumnValuesUrlHostnameMatchWithCert
python
TheAlgorithms__Python
other/word_search.py
{ "start": 489, "end": 15112 }
class ____: """ >>> ws = WordSearch(WORDS, WIDTH, HEIGHT) >>> ws.board # doctest: +ELLIPSIS [[None, ..., None], ..., [None, ..., None]] >>> ws.generate_board() """ def __init__(self, words: list[str], width: int, height: int) -> None: self.words = words self.width = width self.height = height # Board matrix holding each letter self.board: list[list[str | None]] = [[None] * width for _ in range(height)] def insert_north(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_north("cat", [2], [2]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, 't'], [None, None, 'a'], [None, None, 'c']] >>> ws.insert_north("at", [0, 1, 2], [2, 1]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, 't', 't'], [None, 'a', 'a'], [None, None, 'c']] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space above the row to fit in the word if word_length > row + 1: continue # Attempt to insert the word into each column for col in cols: # Only check to be made here is if there are existing letters # above the column that will be overwritten letters_above = [self.board[row - i][col] for i in range(word_length)] if all(letter is None for letter in letters_above): # Successful, insert the word north for i in range(word_length): self.board[row - i][col] = word[i] return def insert_northeast(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_northeast("cat", [2], [0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, 't'], [None, 'a', None], ['c', None, None]] >>> ws.insert_northeast("at", [0, 1], [2, 1, 0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, 't', 't'], ['a', 'a', None], ['c', None, None]] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space for the word above the row if word_length > row + 1: continue # Attempt to insert the word into each column for col in cols: # Check if there is space to the right of the word as well as above if word_length + col > self.width: continue # Check if there are existing letters # to the right of the column that will be overwritten letters_diagonal_left = [ self.board[row - i][col + i] for i in range(word_length) ] if all(letter is None for letter in letters_diagonal_left): # Successful, insert the word northeast for i in range(word_length): self.board[row - i][col + i] = word[i] return def insert_east(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_east("cat", [1], [0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, None], ['c', 'a', 't'], [None, None, None]] >>> ws.insert_east("at", [1, 0], [2, 1, 0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, 'a', 't'], ['c', 'a', 't'], [None, None, None]] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Attempt to insert the word into each column for col in cols: # Check if there is space to the right of the word if word_length + col > self.width: continue # Check if there are existing letters # to the right of the column that will be overwritten letters_left = [self.board[row][col + i] for i in range(word_length)] if all(letter is None for letter in letters_left): # Successful, insert the word east for i in range(word_length): self.board[row][col + i] = word[i] return def insert_southeast(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_southeast("cat", [0], [0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['c', None, None], [None, 'a', None], [None, None, 't']] >>> ws.insert_southeast("at", [1, 0], [2, 1, 0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['c', None, None], ['a', 'a', None], [None, 't', 't']] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space for the word below the row if word_length + row > self.height: continue # Attempt to insert the word into each column for col in cols: # Check if there is space to the right of the word as well as below if word_length + col > self.width: continue # Check if there are existing letters # to the right of the column that will be overwritten letters_diagonal_left = [ self.board[row + i][col + i] for i in range(word_length) ] if all(letter is None for letter in letters_diagonal_left): # Successful, insert the word southeast for i in range(word_length): self.board[row + i][col + i] = word[i] return def insert_south(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_south("cat", [0], [0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['c', None, None], ['a', None, None], ['t', None, None]] >>> ws.insert_south("at", [2, 1, 0], [0, 1, 2]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['c', None, None], ['a', 'a', None], ['t', 't', None]] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space below the row to fit in the word if word_length + row > self.height: continue # Attempt to insert the word into each column for col in cols: # Only check to be made here is if there are existing letters # below the column that will be overwritten letters_below = [self.board[row + i][col] for i in range(word_length)] if all(letter is None for letter in letters_below): # Successful, insert the word south for i in range(word_length): self.board[row + i][col] = word[i] return def insert_southwest(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_southwest("cat", [0], [2]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, 'c'], [None, 'a', None], ['t', None, None]] >>> ws.insert_southwest("at", [1, 2], [2, 1, 0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, 'c'], [None, 'a', 'a'], ['t', 't', None]] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space for the word below the row if word_length + row > self.height: continue # Attempt to insert the word into each column for col in cols: # Check if there is space to the left of the word as well as below if word_length > col + 1: continue # Check if there are existing letters # to the right of the column that will be overwritten letters_diagonal_left = [ self.board[row + i][col - i] for i in range(word_length) ] if all(letter is None for letter in letters_diagonal_left): # Successful, insert the word southwest for i in range(word_length): self.board[row + i][col - i] = word[i] return def insert_west(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_west("cat", [1], [2]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [[None, None, None], ['t', 'a', 'c'], [None, None, None]] >>> ws.insert_west("at", [1, 0], [1, 2, 0]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['t', 'a', None], ['t', 'a', 'c'], [None, None, None]] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Attempt to insert the word into each column for col in cols: # Check if there is space to the left of the word if word_length > col + 1: continue # Check if there are existing letters # to the left of the column that will be overwritten letters_left = [self.board[row][col - i] for i in range(word_length)] if all(letter is None for letter in letters_left): # Successful, insert the word west for i in range(word_length): self.board[row][col - i] = word[i] return def insert_northwest(self, word: str, rows: list[int], cols: list[int]) -> None: """ >>> ws = WordSearch(WORDS, 3, 3) >>> ws.insert_northwest("cat", [2], [2]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['t', None, None], [None, 'a', None], [None, None, 'c']] >>> ws.insert_northwest("at", [1, 2], [0, 1]) >>> ws.board # doctest: +NORMALIZE_WHITESPACE [['t', None, None], ['t', 'a', None], [None, 'a', 'c']] """ word_length = len(word) # Attempt to insert the word into each row and when successful, exit for row in rows: # Check if there is space for the word above the row if word_length > row + 1: continue # Attempt to insert the word into each column for col in cols: # Check if there is space to the left of the word as well as above if word_length > col + 1: continue # Check if there are existing letters # to the right of the column that will be overwritten letters_diagonal_left = [ self.board[row - i][col - i] for i in range(word_length) ] if all(letter is None for letter in letters_diagonal_left): # Successful, insert the word northwest for i in range(word_length): self.board[row - i][col - i] = word[i] return def generate_board(self) -> None: """ Generates a board with a random direction for each word. >>> wt = WordSearch(WORDS, WIDTH, HEIGHT) >>> wt.generate_board() >>> len(list(filter(lambda word: word is not None, sum(wt.board, start=[]))) ... ) == sum(map(lambda word: len(word), WORDS)) True """ directions = ( self.insert_north, self.insert_northeast, self.insert_east, self.insert_southeast, self.insert_south, self.insert_southwest, self.insert_west, self.insert_northwest, ) for word in self.words: # Shuffle the row order and column order that is used when brute forcing # the insertion of the word rows, cols = list(range(self.height)), list(range(self.width)) shuffle(rows) shuffle(cols) # Insert the word via the direction choice(directions)(word, rows, cols) def visualise_word_search( board: list[list[str | None]] | None = None, *, add_fake_chars: bool = True ) -> None: """ Graphically displays the word search in the terminal. >>> ws = WordSearch(WORDS, 5, 5) >>> ws.insert_north("cat", [4], [4]) >>> visualise_word_search( ... ws.board, add_fake_chars=False) # doctest: +NORMALIZE_WHITESPACE # # # # # # # # # # # # # # t # # # # a # # # # c >>> ws.insert_northeast("snake", [4], [4, 3, 2, 1, 0]) >>> visualise_word_search( ... ws.board, add_fake_chars=False) # doctest: +NORMALIZE_WHITESPACE # # # # e # # # k # # # a # t # n # # a s # # # c """ if board is None: word_search = WordSearch(WORDS, WIDTH, HEIGHT) word_search.generate_board() board = word_search.board result = "" for row in range(len(board)): for col in range(len(board[0])): character = "#" if (letter := board[row][col]) is not None: character = letter # Empty char, so add a fake char elif add_fake_chars: character = chr(randint(97, 122)) result += f"{character} " result += "\n" print(result, end="") if __name__ == "__main__": import doctest doctest.testmod() visualise_word_search()
WordSearch
python
django__django
tests/test_utils/tests.py
{ "start": 80868, "end": 83536 }
class ____(SimpleTestCase): def test_disallowed_database_connections(self): expected_message = ( "Database connections to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message): connection.connect() with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message): connection.temporary_connection() def test_disallowed_database_queries(self): expected_message = ( "Database queries to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message): Car.objects.first() def test_disallowed_database_chunked_cursor_queries(self): expected_message = ( "Database queries to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message): next(Car.objects.iterator()) def test_disallowed_thread_database_connection(self): expected_message = ( "Database threaded connections to 'default' are not allowed in " "SimpleTestCase subclasses. Either subclass TestCase or TransactionTestCase" " to ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) exceptions = [] def thread_func(): try: Car.objects.first() except DatabaseOperationForbidden as e: exceptions.append(e) t = threading.Thread(target=thread_func) t.start() t.join() self.assertEqual(len(exceptions), 1) self.assertEqual(exceptions[0].args[0], expected_message)
DisallowedDatabaseQueriesTests
python
readthedocs__readthedocs.org
readthedocs/subscriptions/forms.py
{ "start": 218, "end": 1075 }
class ____(forms.Form): """Form to create a subscription after the previous one has ended.""" plan = forms.ChoiceField() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) products_id = [product.stripe_id for product in get_listed_products()] stripe_prices = ( djstripe.Price.objects.filter(product__id__in=products_id, active=True) .select_related("product") .order_by("unit_amount") ) self.fields["plan"].choices = [ (price.id, f"{price.product.name} ({price.human_readable_price})") for price in stripe_prices ] self.fields["plan"].help_text = _( 'Check our <a href="https://about.readthedocs.com/pricing/">pricing page</a> ' "for more information about each plan." )
PlanForm
python
davidhalter__jedi
test/completion/async_.py
{ "start": 1472, "end": 1776 }
class ____: def some_method(): pass async def __aenter__(self): return self async def __aexit__(self, *args): pass async def asyncctxmgr(): async with AsyncCtxMgr() as acm: #? AsyncCtxMgr() acm #? ['some_method'] acm.som
AsyncCtxMgr
python
kamyu104__LeetCode-Solutions
Python/spiral-matrix-iv.py
{ "start": 172, "end": 828 }
class ____(object): def spiralMatrix(self, m, n, head): """ :type m: int :type n: int :type head: Optional[ListNode] :rtype: List[List[int]] """ directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] result = [[-1]*n for _ in xrange(m)] i = j = d = 0 while head: result[i][j] = head.val if not (0 <= i+directions[d][0] < m and 0 <= j+directions[d][1] < n and result[i+directions[d][0]][j+directions[d][1]] == -1): d = (d+1)%4 i, j = i+directions[d][0], j+directions[d][1] head = head.next return result
Solution
python
spack__spack
lib/spack/spack/vendor/jinja2/nativetypes.py
{ "start": 2703, "end": 3969 }
class ____(Template): environment_class = NativeEnvironment def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any: """Render the template to produce a native Python type. If the result is a single node, its value is returned. Otherwise, the nodes are concatenated as strings. If the result can be parsed with :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the string is returned. """ ctx = self.new_context(dict(*args, **kwargs)) try: return native_concat(self.root_render_func(ctx)) # type: ignore except Exception: return self.environment.handle_exception() async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any: if not self.environment.is_async: raise RuntimeError( "The environment was not created with async mode enabled." ) ctx = self.new_context(dict(*args, **kwargs)) try: return native_concat( [n async for n in self.root_render_func(ctx)] # type: ignore ) except Exception: return self.environment.handle_exception() NativeEnvironment.template_class = NativeTemplate
NativeTemplate
python
getsentry__sentry
src/sentry/models/options/project_template_option.py
{ "start": 3378, "end": 4138 }
class ____(Model): """ A ProjectTemplateOption is a templated version of a project It is used to store the values that are shared between different projects across the organization. """ __relocation_scope__ = RelocationScope.Organization project_template = FlexibleForeignKey("sentry.ProjectTemplate", related_name="options") key = models.CharField(max_length=64) value = models.JSONField(null=True) objects: ClassVar[ProjectTemplateOptionManager] = ProjectTemplateOptionManager() class Meta: app_label = "sentry" db_table = "sentry_projecttemplateoption" unique_together = (("project_template", "key"),) __repr__ = sane_repr("project_template_id", "key", "value")
ProjectTemplateOption
python
google__pytype
pytype/pyi/modules.py
{ "start": 322, "end": 539 }
class ____: """Result of processing an import statement.""" pytd_node: Any name: str new_name: str qualified_name: str = "" def pytd_alias(self): return pytd.Alias(self.new_name, self.pytd_node)
Import
python
django__django
tests/forms_tests/widget_tests/test_multiwidget.py
{ "start": 1608, "end": 2293 }
class ____(MultiWidget): """ Used to test MultiWidget.__deepcopy__(). """ def __init__(self, choices=[]): widgets = [ RadioSelect(choices=choices), TextInput, ] super().__init__(widgets) def _set_choices(self, choices): """ When choices are set for this widget, we want to pass those along to the Select widget. """ self.widgets[0].choices = choices def _get_choices(self): """ The choices for this widget are the Select widget's choices. """ return self.widgets[0].choices choices = property(_get_choices, _set_choices)
DeepCopyWidget
python
pennersr__django-allauth
tests/apps/socialaccount/providers/robinhood/tests.py
{ "start": 246, "end": 594 }
class ____(OAuth2TestsMixin, TestCase): provider_id = RobinhoodProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ { "username": "test_username", "id": "1234-5678-910" } """, ) def get_expected_to_str(self): return "test_username"
RobinhoodTests
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_are_in_language.py
{ "start": 1903, "end": 6539 }
class ____(ColumnMapExpectation): """Expect the column to be in a specified language. Args: column (str): \ The column name language (str): \ One of 97 ISO 639-1 language codes, e.g. af, am, an, ar, as, az, be, bg, bn, br, bs, ca, cs, cy, da, \ de, dz, el, en, eo, es, et, eu, fa, fi, fo, fr, ga, gl, gu, he, hi, hr, ht, hu, hy, id, is, it, ja, \ jv, ka, kk, km, kn, ko, ku, ky, la, lb, lo, lt, lv, mg, mk, ml, mn, mr, ms, mt, nb, ne, nl, nn, no, \ oc, or, pa, pl, ps, pt, qu, ro, ru, rw, se, si, sk, sl, sq, sr, sv, sw, ta, te, th, tl, tr, ug, uk, \ ur, vi, vo, wa, xh, zh, zu Notes: * Language identification uses the [langid package](https://github.com/saffsd/langid.py). * langid uses a custom, permissive [LICENSE](https://github.com/saffsd/langid.py/blob/master/LICENSE), \ suitable for commercial purposes. * Results may be inaccurate for strings shorter than 50 characters. * No confidence threshold has been set, so language with the highest confidence will be selected, even if \ confidence is low. """ # These examples will be shown in the public gallery, and also executed as unit tests for your Expectation examples = [ { "data": { "mostly_english": [ "Twinkle, twinkle, little star. How I wonder what you are.", "Up above the world so high, Like a diamond in the sky.", "Twinkle, twinkle, little star. Up above the world so high.", "Brilla brilla pequeña estrella. Cómo me pregunto lo que eres.", None, ], "mostly_spanish": [ "Brilla brilla pequeña estrella. Cómo me pregunto lo que eres.", "Por encima del mundo tan alto, Como un diamante en el cielo.", "Brilla brilla pequeña estrella. Por encima del mundo tan arriba.", "Twinkle, twinkle, little star. How I wonder what you are.", None, ], }, "tests": [ { "title": "positive_test_with_mostly_english", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "mostly_english", "language": "en", "mostly": 0.6}, "out": { "success": True, }, }, { "title": "negative_test_with_mostly_english", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "mostly_spanish", "language": "en", "mostly": 0.6}, "out": { "success": False, }, }, { "title": "positive_test_with_mostly_spanish", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "mostly_spanish", "language": "es", "mostly": 0.6}, "out": { "success": True, }, }, ], } ] # This dictionary contains metadata for display in the public gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": ["nlp", "hackathon"], "contributors": ["@victorwyee"], "requirements": ["langid>=1.1.6"], } # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.are_in_language" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False # Please see {some doc} for more information about domain and success keys, and other arguments to Expectations success_keys = ( "language", "mostly", ) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This method defines a question Renderer # For more info on Renderers, see {some doc} #!!! This example renderer should render RenderedStringTemplateContent, not just a string if __name__ == "__main__": ExpectColumnValuesAreInLanguage().print_diagnostic_checklist()
ExpectColumnValuesAreInLanguage
python
kamyu104__LeetCode-Solutions
Python/diameter-of-binary-tree.py
{ "start": 184, "end": 1113 }
class ____(object): def diameterOfBinaryTree(self, root): """ :type root: TreeNode :rtype: int """ def iter_dfs(node): result = 0 stk = [(1, [node, [0]])] while stk: step, params = stk.pop() if step == 1: node, ret = params if not node: continue ret1, ret2 = [0], [0] stk.append((2, [node, ret1, ret2, ret])) stk.append((1, [node.right, ret2])) stk.append((1, [node.left, ret1])) elif step == 2: node, ret1, ret2, ret = params result = max(result, ret1[0]+ret2[0]) ret[0] = 1+max(ret1[0], ret2[0]) return result return iter_dfs(root) # Time: O(n) # Space: O(h)
Solution
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_numeric.py
{ "start": 77510, "end": 77841 }
class ____(TestCase): def test_basic(self): A = np.array([1, 1.0j, -1, -1.0j]) real_var = 1 assert_almost_equal(np.var(A), real_var) assert_almost_equal(np.std(A) ** 2, real_var) def test_scalars(self): assert_equal(np.var(1j), 0) assert_equal(np.std(1j), 0)
TestStdVarComplex
python
tornadoweb__tornado
tornado/routing.py
{ "start": 890, "end": 6268 }
class ____ match on more criteria than `.Application`, or the `Router` interface can be subclassed for maximum customization. `Router` interface extends `~.httputil.HTTPServerConnectionDelegate` to provide additional routing capabilities. This also means that any `Router` implementation can be used directly as a ``request_callback`` for `~.httpserver.HTTPServer` constructor. `Router` subclass must implement a ``find_handler`` method to provide a suitable `~.httputil.HTTPMessageDelegate` instance to handle the request: .. code-block:: python class CustomRouter(Router): def find_handler(self, request, **kwargs): # some routing logic providing a suitable HTTPMessageDelegate instance return MessageDelegate(request.connection) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK") self.connection.finish() router = CustomRouter() server = HTTPServer(router) The main responsibility of `Router` implementation is to provide a mapping from a request to `~.httputil.HTTPMessageDelegate` instance that will handle this request. In the example above we can see that routing is possible even without instantiating an `~.web.Application`. For routing to `~.web.RequestHandler` implementations we need an `~.web.Application` instance. `~.web.Application.get_handler_delegate` provides a convenient way to create `~.httputil.HTTPMessageDelegate` for a given request and `~.web.RequestHandler`. Here is a simple example of how we can we route to `~.web.RequestHandler` subclasses by HTTP method: .. code-block:: python resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) router = HTTPMethodRouter(Application()) server = HTTPServer(router) `ReversibleRouter` interface adds the ability to distinguish between the routes and reverse them to the original urls using route's name and additional arguments. `~.web.Application` is itself an implementation of `ReversibleRouter` class. `RuleRouter` and `ReversibleRuleRouter` are implementations of `Router` and `ReversibleRouter` interfaces and can be used for creating rule-based routing configurations. Rules are instances of `Rule` class. They contain a `Matcher`, which provides the logic for determining whether the rule is a match for a particular request and a target, which can be one of the following. 1) An instance of `~.httputil.HTTPServerConnectionDelegate`: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/handler"), ConnectionDelegate()), # ... more rules ]) class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return MessageDelegate(request_conn) 2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/callable"), request_callable) ]) def request_callable(request): request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") request.finish() 3) Another `Router` instance: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/router.*"), CustomRouter()) ]) Of course a nested `RuleRouter` or a `~.web.Application` is allowed: .. code-block:: python router = RuleRouter([ Rule(HostMatches("example.com"), RuleRouter([ Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)])), ])) ]) server = HTTPServer(router) In the example below `RuleRouter` is used to route between applications: .. code-block:: python app1 = Application([ (r"/app1/handler", Handler1), # other handlers ... ]) app2 = Application([ (r"/app2/handler", Handler2), # other handlers ... ]) router = RuleRouter([ Rule(PathMatches("/app1.*"), app1), Rule(PathMatches("/app2.*"), app2) ]) server = HTTPServer(router) For more information on application-level routing see docs for `~.web.Application`. .. versionadded:: 4.5 """ import re from functools import partial from tornado import httputil from tornado.httpserver import _CallableAdapter from tornado.escape import url_escape, url_unescape, utf8 from tornado.log import app_log from tornado.util import basestring_type, import_object, re_unescape, unicode_type from typing import ( Any, Union, Optional, Awaitable, List, Dict, Pattern, Tuple, overload, Sequence, )
can
python
google__jax
tests/pallas/tpu_pallas_test.py
{ "start": 25919, "end": 57686 }
class ____(PallasBaseTest): def setUp(self): super().setUp() if not jtu.is_device_tpu_at_least(4): self.skipTest('DMAs not supported on TPU generations <= 3') def test_can_have_unspecified_memory_spaces(self): def kernel(x_ref, y_ref): # Just test whether things compile del x_ref, y_ref x = jnp.ones((8, 128), dtype=jnp.float32) y = self.pallas_call( kernel, in_specs=[pl.BlockSpec(memory_space=pl.ANY)], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) jax.block_until_ready(y) def test_run_scoped_tracks_effects(self): def kernel(x_ref, y_ref): def body(temp_ref): temp_ref[...] = jnp.ones_like(temp_ref) x_ref[...] = 4 * y_ref[...] + temp_ref[...] pl.run_scoped(body, pltpu.VMEM((8,), jnp.float32)) return [] jaxpr, _, _ = pe.trace_to_jaxpr_dynamic( wrap_init(kernel, 2), [ state.shaped_array_ref((8,), jnp.float32), state.shaped_array_ref((8,), jnp.float32), ], ) expected_effects = {state.ReadEffect(1), state.WriteEffect(0)} self.assertSetEqual(jaxpr.effects, expected_effects) def test_scoped_allocation(self): def kernel(y_ref): def body(x_ref): x_ref[...] = jnp.ones_like(x_ref) y_ref[...] = 4 * x_ref[...] pl.run_scoped(body, pltpu.VMEM((8, 128), jnp.float32)) o = self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )() np.testing.assert_allclose(o, 4 * np.ones_like(o)) def test_run_scoped_can_return_scalar_value(self): def kernel(y_ref): def body(x_ref): x_ref[0] = 0 x_ref[0] += 1 return x_ref[0] + 2 out = pl.run_scoped(body, pltpu.SMEM((1,), jnp.int32)) y_ref[0] = out o = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, out_specs=pl.BlockSpec(memory_space=pltpu.SMEM), ), out_shape=jax.ShapeDtypeStruct((1,), jnp.int32), )() np.testing.assert_allclose(o, jnp.array([3], jnp.int32)) def test_run_scoped_can_return_scalar_values(self): def kernel(y_ref): def body(x_ref): x_ref[0] = 0 x_ref[0] += 1 return x_ref[0] + 2, x_ref[0] out = pl.run_scoped(body, pltpu.SMEM((1,), jnp.int32)) y_ref[0], y_ref[1] = out o = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, out_specs=pl.BlockSpec(memory_space=pltpu.SMEM), ), out_shape=jax.ShapeDtypeStruct((2,), jnp.int32), )() np.testing.assert_allclose(o, jnp.array([3, 1], jnp.int32)) def test_run_scoped_can_return_vector_values(self): def kernel(y_ref): def body(x_ref): x_ref[...] = jnp.ones_like(x_ref) return x_ref[...] + 1 out = pl.run_scoped(body, pltpu.VMEM((16, 128), jnp.int32)) y_ref[...] = out o = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), ), out_shape=jax.ShapeDtypeStruct((16, 128), jnp.int32), )() np.testing.assert_allclose(o, jnp.full((16, 128), 2, dtype=jnp.int32)) def test_run_scoped_can_return_padded_vector_values(self): def kernel(y_ref): def body(x_ref): x_ref[...] = jnp.ones_like(x_ref) return x_ref[...] + 1 out = pl.run_scoped(body, pltpu.VMEM((17, 128), jnp.int32)) y_ref[...] = out o = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), ), out_shape=jax.ShapeDtypeStruct((17, 128), jnp.int32), )() np.testing.assert_allclose(o, jnp.full((17, 128), 2, dtype=jnp.int32)) def test_nested_scoped_allocation(self): def kernel(y_ref): def body(x_ref): x_ref[...] = jnp.zeros_like(x_ref) def inner_body(z_ref): z_ref[...] = jnp.ones_like(z_ref) x_ref[...] = z_ref[...] pl.run_scoped(inner_body, pltpu.VMEM((8, 128), jnp.float32)) y_ref[...] = 4 * x_ref[...] pl.run_scoped(body, pltpu.VMEM((8, 128), jnp.float32)) o = self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )() np.testing.assert_allclose(o, 4 * np.ones_like(o)) def test_run_scoped_partial_discharge(self): def f(a_ref, b_ref): def scope(): a_ref[...] = jnp.ones(4, jnp.float32) b_ref[...] = jnp.ones(4, jnp.float32) return [] pl.run_scoped(scope) return [] aref1 = state.AbstractRef(jax.core.ShapedArray((4,), jnp.dtype('float32'))) aref2 = state.AbstractRef(jax.core.ShapedArray((4,), jnp.dtype('float32'))) in_avals = [aref1, aref2] stateful_jaxpr, _, () = pe.trace_to_jaxpr_dynamic(wrap_init(f, 2), in_avals) discharged_jaxpr, _ = state_discharge.discharge_state( stateful_jaxpr, consts=(), should_discharge=[False, True]) self.assertLen(discharged_jaxpr.invars, 2) self.assertLen(discharged_jaxpr.outvars, 1) self.assertIsInstance(discharged_jaxpr.invars[0].aval, state.AbstractRef) self.assertIsInstance(discharged_jaxpr.invars[1].aval, jax.core.ShapedArray) self.assertEqual(discharged_jaxpr.effects, {state.WriteEffect(0)}) def test_can_allocate_semaphore(self): def kernel(y_ref): def body(sem1): pass pl.run_scoped(body, pltpu.SemaphoreType.DMA) jax.block_until_ready(self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )()) def test_can_allocate_multiple_semaphores(self): def kernel(y_ref): def body(sem1, sem2): pass pl.run_scoped(body, pltpu.SemaphoreType.DMA, pltpu.SemaphoreType.REGULAR) jax.block_until_ready(self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )()) def test_can_allocate_semaphore_array(self): def kernel(y_ref): def body(dma_sems, sems): self.assertTupleEqual(dma_sems.shape, (4,)) self.assertTupleEqual(sems.shape, (3,)) self.assertTrue(jnp.issubdtype(dma_sems.dtype, pltpu.dma_semaphore)) self.assertTrue(jnp.issubdtype(sems.dtype, pltpu.semaphore)) pl.run_scoped( body, pltpu.SemaphoreType.DMA((4,)), pltpu.SemaphoreType.REGULAR((3,)) ) jax.block_until_ready(self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )()) def test_can_allocate_scratch_semaphore_array(self): def kernel(y_ref, dma_sems, sems): self.assertTupleEqual(dma_sems.shape, (4,)) self.assertTupleEqual(sems.shape, (3,)) self.assertTrue(jnp.issubdtype(dma_sems.dtype, pltpu.dma_semaphore)) self.assertTrue(jnp.issubdtype(sems.dtype, pltpu.semaphore)) jax.block_until_ready( self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, scratch_shapes=[ pltpu.SemaphoreType.DMA((4,)), pltpu.SemaphoreType.REGULAR((3,)), ], ), )() ) def test_can_wait_on_semaphore(self): def kernel(y_ref): def body(sem): pltpu.semaphore_signal(sem) pltpu.semaphore_wait(sem) pl.run_scoped(body, pltpu.SemaphoreType.REGULAR) def body2(sem): pltpu.semaphore_signal(sem, 2) pltpu.semaphore_wait(sem) pltpu.semaphore_wait(sem) pl.run_scoped(body2, pltpu.SemaphoreType.REGULAR) def body3(sem): pltpu.semaphore_signal(sem) pltpu.semaphore_signal(sem) pltpu.semaphore_signal(sem) pltpu.semaphore_wait(sem) pltpu.semaphore_wait(sem) pltpu.semaphore_wait(sem) pl.run_scoped(body3, pltpu.SemaphoreType.REGULAR) # TODO(b/345534352): Add interpret support for semaphore signal/wait. jax.block_until_ready(self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )()) def test_can_wait_on_semaphore_array(self): def kernel(y_ref): def body(sems): pltpu.semaphore_signal(sems.at[0]) pltpu.semaphore_wait(sems.at[0]) pltpu.semaphore_signal(sems.at[1], 2) pltpu.semaphore_wait(sems.at[1]) pltpu.semaphore_wait(sems.at[1]) pltpu.semaphore_signal(sems.at[2]) pltpu.semaphore_signal(sems.at[2]) pltpu.semaphore_signal(sems.at[2]) pltpu.semaphore_wait(sems.at[2]) pltpu.semaphore_wait(sems.at[2]) pltpu.semaphore_wait(sems.at[2]) pl.run_scoped(body, pltpu.SemaphoreType.REGULAR((3,))) # TODO(b/345534352): Add interpret support for semaphore signal/wait. jax.block_until_ready(self.pallas_call( kernel, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )()) def test_can_wait_on_semaphore_array_with_dynamic_index(self): def kernel(y_ref): i = pl.program_id(0) def body(sems): pltpu.semaphore_signal(sems.at[i, 0]) pltpu.semaphore_wait(sems.at[i, 0]) pltpu.semaphore_signal(sems.at[i, 1], 2) pltpu.semaphore_wait(sems.at[i, 1]) pltpu.semaphore_wait(sems.at[i, 1]) pltpu.semaphore_signal(sems.at[i, 2]) pltpu.semaphore_signal(sems.at[i, 2]) pltpu.semaphore_signal(sems.at[i, 2]) pltpu.semaphore_wait(sems.at[i, 2]) pltpu.semaphore_wait(sems.at[i, 2]) pltpu.semaphore_wait(sems.at[i, 2]) pl.run_scoped(body, pltpu.SemaphoreType.REGULAR((4, 3))) jax.block_until_ready( self.pallas_call( kernel, in_specs=[], out_specs=pl.BlockSpec((8, 128), lambda i: (0, 0)), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), grid=4, )() ) def test_can_read_semaphore(self): m, n = 2, 3 def kernel(y_ref): def body(sems): for r in range(m): for c in range(n): v = r * n + c pltpu.semaphore_signal(sems.at[r, c],v) y_ref[r, c] = pltpu.semaphore_read(sems.at[r, c]) pltpu.semaphore_wait(sems.at[r, c], v) pl.run_scoped(body, pltpu.SemaphoreType.REGULAR((m, n))) y = jax.block_until_ready( self.pallas_call( kernel, out_specs=pl.BlockSpec(memory_space=pltpu.SMEM), out_shape=jax.ShapeDtypeStruct((m, n), jnp.int32), )() ) np.testing.assert_array_equal( y, jnp.arange(m * n).astype(jnp.int32).reshape((m, n)) ) def test_can_read_dma_semaphore(self): def kernel(x_hbm_ref, y_hbm_ref, sem_val_ref, dma_sem): sem_val_ref[0, 0] = 123 pltpu.async_copy(x_hbm_ref, y_hbm_ref, dma_sem).wait() sem_val_ref[0, 0] = pltpu.semaphore_read(dma_sem) x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128)) y, sem_val = jax.block_until_ready( self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, in_specs=[pl.BlockSpec(memory_space=pl.ANY)], out_specs=[ pl.BlockSpec(memory_space=pl.ANY), pl.BlockSpec(memory_space=pltpu.SMEM), ], scratch_shapes=[pltpu.SemaphoreType.DMA], ), out_shape=[ jax.ShapeDtypeStruct((8, 128), jnp.int32), jax.ShapeDtypeStruct((1, 1), jnp.int32), ], )(x) ) np.testing.assert_array_equal(y, x) np.testing.assert_array_equal(sem_val, 0) def test_set_dma_priority(self): if jtu.get_tpu_version() < 5: self.skipTest('Target does not support DMA prefetch between HBM and VMEM') def kernel(x1, x2, y1, y2, scratch1, scratch2, sem1, sem2): copy1 = pltpu.async_copy(x1, scratch1, sem1, priority=1) copy2 = pltpu.async_copy(x2, scratch2, sem2, priority=0) copy1.wait() copy2.wait() copy1 = pltpu.async_copy(scratch1, y1, sem1, priority=0) copy2 = pltpu.async_copy(scratch2, y2, sem2, priority=1) copy1.wait() copy2.wait() shape = (8, 128) dtype = jnp.int32 x1 = jnp.arange(np.prod(shape), dtype=dtype).reshape(shape) x2 = x1 + 1 y1, y2 = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, in_specs=[pl.BlockSpec(memory_space=pl.ANY)] * 2, scratch_shapes=[pltpu.VMEM(shape, dtype)] * 2 + [pltpu.SemaphoreType.DMA] * 2, out_specs=[pl.BlockSpec(memory_space=pl.ANY)] * 2, ), out_shape=[jax.ShapeDtypeStruct(shape, dtype)] * 2, )(x1, x2) np.testing.assert_array_equal(y1, x1) np.testing.assert_array_equal(y2, x2) def test_hbm_hbm_dma(self): def kernel(x_hbm_ref, y_hbm_ref): def body(sem): pltpu.async_copy(x_hbm_ref.at[:8, :], y_hbm_ref.at[:, :128], sem).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_array_equal(y, x) def test_host_input_host_to_hbm_dma(self): if self.INTERPRET: self.skipTest('Interpret mode does not support host memory.') if jax.device_count() > 1: self.skipTest("Test only works with a single device.") def kernel(x_host_ref, y_hbm_ref): def body(sem): pltpu.async_copy(x_host_ref, y_hbm_ref, sem).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(8 * 128.0).reshape((8, 128)) # Move input to the host. x = jax.device_put( x, jax.sharding.NamedSharding( jax.sharding.Mesh(jax.devices(), 'x'), jax.sharding.PartitionSpec(), memory_kind='pinned_host', ), ) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.HOST), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_array_equal(y, x) def test_hbm_to_host_host_output_dma(self): if jax.device_count() > 1: self.skipTest("Test only works with a single device.") def kernel(y_hbm_ref, x_host_ref): def body(sem): pltpu.async_copy(y_hbm_ref, x_host_ref, sem).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) host_sharding = jax.sharding.NamedSharding( jax.sharding.Mesh(jax.devices(), 'x'), jax.sharding.PartitionSpec(), memory_kind='pinned_host', ) x = jnp.arange(8 * 128.0).reshape((8, 128)) @functools.partial(jax.jit, out_shardings=host_sharding) def f(x): return self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pl.HOST), out_shape=pltpu.HOST(shape=(8, 128), dtype=jnp.float32), )(x) y = f(x) np.testing.assert_array_equal(y, x) def test_cannot_dma_with_nonscalar_semaphore_ref(self): def kernel(x_hbm_ref, y_hbm_ref): def body(sem): pltpu.async_copy(x_hbm_ref.at[pl.ds(8), :], y_hbm_ref.at[:, pl.ds(128)], sem).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA((1,))) with self.assertRaisesRegex(ValueError, 'Cannot signal'): x = jnp.arange(8 * 128.).reshape((8, 128)) self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) def test_dma_with_scalar_semaphore_ref(self): def kernel(x_hbm_ref, y_hbm_ref): def body(sem): pltpu.async_copy(x_hbm_ref.at[pl.ds(8), :], y_hbm_ref.at[:, pl.ds(128)], sem.at[0]).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA((1,))) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_array_equal(y, x) def test_output_dma_semaphore_ref(self): if self.INTERPRET: self.skipTest('TODO(sharadmv, justinfu): Add interpret support for DMA.') def kernel(x_hbm_ref, y_hbm_ref, sem_out): pltpu.make_async_copy( x_hbm_ref.at[pl.ds(8), :], y_hbm_ref.at[:, pl.ds(128)], sem_out ).start() def kernel2(x_hbm_ref, y_hbm_ref, sem_in, y_hbm_out): del y_hbm_out pltpu.make_async_copy( x_hbm_ref.at[pl.ds(8), :], y_hbm_ref.at[:, pl.ds(128)], sem_in ).wait() x = jnp.arange(8 * 128.0).reshape((8, 128)) @jax.jit def body(x): y, sem_out = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=[ pl.BlockSpec(memory_space=pl.ANY), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_shape=[ jax.ShapeDtypeStruct((8, 128), jnp.float32), pltpu.SemaphoreType.DMA, ], )(x) y = self.pallas_call( kernel2, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), pl.BlockSpec(memory_space=pl.ANY), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), input_output_aliases={1: 0}, )(x, y, sem_out) return y np.testing.assert_array_equal(body(x), x) def test_hbm_hbm_grid_dma(self): # When using the grid, we have to emit Mosaic window_params. Test that they # work correctly with ANY memory space operands. def kernel(x_hbm_ref, y_hbm_ref): i = pl.program_id(0) def body(sem): pltpu.async_copy( x_hbm_ref.at[pl.ds(i, 1)], y_hbm_ref.at[pl.ds(i, 1)], sem ).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(2 * 8 * 128.).reshape((2, 8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((2, 8, 128), jnp.float32), grid=(2,), )(x) np.testing.assert_allclose(y, x) def test_hbm_vmem_dma(self): def kernel(x_hbm_ref, y_ref): def body(x_ref, sem): pltpu.async_copy(x_hbm_ref.at[pl.ds(8), :], x_ref.at[:, pl.ds(128)], sem).wait() y_ref[...] = x_ref[...] pl.run_scoped( body, pltpu.VMEM((8, 128), jnp.float32), pltpu.SemaphoreType.DMA ) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_vmem_hbm_dma(self): def kernel(x_ref, y_hbm_ref): def body(y_ref, sem): y_ref[...] = x_ref[...] pltpu.async_copy(y_ref, y_hbm_ref, sem).wait() pl.run_scoped( body, pltpu.VMEM((8, 128), jnp.float32), pltpu.SemaphoreType.DMA ) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_vmem_hbm_vmem_dma(self): def kernel(x_hbm_ref, y_hbm_ref): def body(x_ref, y_ref, sem): pltpu.async_copy(x_hbm_ref, x_ref, sem).wait() y_ref[...] = x_ref[...] pltpu.async_copy(y_ref, y_hbm_ref, sem).wait() pl.run_scoped( body, pltpu.VMEM((8, 128), jnp.float32), pltpu.VMEM((8, 128), jnp.float32), pltpu.SemaphoreType.DMA, ) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[pl.BlockSpec(memory_space=pl.ANY)], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_hbm_smem_dma(self): def kernel(x_hbm_ref, y_ref): def body(x_ref, sem): pltpu.async_copy(x_hbm_ref, x_ref, sem).wait() y_ref[...] = x_ref[0, 0] * jnp.ones_like(y_ref) pl.run_scoped( body, pltpu.SMEM((8, 128), jnp.float32), pltpu.SemaphoreType.DMA ) x = 4 * jnp.ones((8, 128), jnp.float32) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_smem_hbm_dma(self): def kernel(x_ref, y_hbm_ref): def body(y_ref, sem): y_ref[0, 0] = 0.0 y_ref[0, 1] = x_ref[4, 4] pltpu.async_copy(y_ref, y_hbm_ref, sem).wait() pl.run_scoped( body, pltpu.SMEM((1, 2), jnp.float32), pltpu.SemaphoreType.DMA ) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pltpu.SMEM), ], out_specs=pl.BlockSpec(memory_space=pl.ANY), out_shape=jax.ShapeDtypeStruct((1, 2), jnp.float32), )(x) expected = jnp.zeros_like(x[0:1, 0:2]).at[0, 1].set(x[4, 4]) np.testing.assert_allclose(y, expected) def test_vmem_vmem_dma(self): def kernel(x_ref, y_ref): def body(sem): pltpu.async_copy(x_ref, y_ref, sem).wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pltpu.VMEM), ], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_hbm_vmem_dma_slicing(self): def kernel(x_hbm_ref, y_ref): def body(sem): dma1 = pltpu.async_copy( x_hbm_ref.at[pl.ds(0, 8)], y_ref.at[pl.ds(0, 8)], sem ) dma2 = pltpu.async_copy( x_hbm_ref.at[pl.ds(8, 8)], y_ref.at[pl.ds(8, 8)], sem ) dma1.wait() dma2.wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(2 * 8 * 128.).reshape((16, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), out_shape=jax.ShapeDtypeStruct((16, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x) def test_hbm_vmem_dma_indexing(self): def kernel(x_hbm_ref, y_ref): def body(sem): dma1 = pltpu.async_copy( x_hbm_ref.at[0], y_ref.at[pl.ds(0, 8)], sem ) dma2 = pltpu.async_copy( x_hbm_ref.at[1], y_ref.at[pl.ds(8, 8)], sem ) dma1.wait() dma2.wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(2 * 8 * 128.).reshape((2, 8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), out_shape=jax.ShapeDtypeStruct((16, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x.reshape((16, 128))) def test_hbm_vmem_dma_multiple_indexing(self): if self.INTERPRET: self.skipTest('Multiple indexing not supported in interpret mode.') def kernel(x_hbm_ref, y_ref): def body(sem): for i in range(3): dma1 = pltpu.async_copy( x_hbm_ref.at[pl.ds(i, 1)].at[0, 0], y_ref.at[i].at[pl.ds(0, 8)], sem ) dma2 = pltpu.async_copy( x_hbm_ref.at[pl.ds(i, 1)].at[0, 1], y_ref.at[i].at[pl.ds(8, 8)], sem ) dma1.wait() dma2.wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(3 * 2 * 8 * 128.).reshape((3, 2, 8, 128)) y = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), out_shape=jax.ShapeDtypeStruct((3, 16, 128), jnp.float32), )(x) np.testing.assert_allclose(y, x.reshape((3, 16, 128))) def test_cannot_squeeze_lane_sublane(self): if self.INTERPRET: self.skipTest('Only works on Mosaic TPU.') def kernel(x_hbm_ref, y_ref): def body(sem): dma1 = pltpu.async_copy( x_hbm_ref.at[:, :, 0], y_ref.at[pl.ds(0, 8)], sem ) dma2 = pltpu.async_copy( x_hbm_ref.at[:, :, 1], y_ref.at[pl.ds(8, 8)], sem ) dma1.wait() dma2.wait() pl.run_scoped(body, pltpu.SemaphoreType.DMA) x = jnp.arange(2 * 8 * 128.).reshape((2, 8, 128)) with self.assertRaises(Exception): _ = self.pallas_call( kernel, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), out_shape=jax.ShapeDtypeStruct((16, 128), jnp.float32), )(x) def test_hoisted_scratch_space(self): def kernel(x_ref, y_ref, scratch_ref): i = pl.program_id(0) @pl.when(i == 0) def _(): scratch_ref[...] = x_ref[...] scratch_ref[...] += jnp.ones_like(scratch_ref) @pl.when(i == 2) def _(): y_ref[...] = scratch_ref[...] x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, in_specs=[ pl.BlockSpec((8, 128), lambda i: (0, 0)), ], scratch_shapes=[pltpu.VMEM((8, 128), jnp.float32)], out_specs=pl.BlockSpec((8, 128), lambda i: (0, 0)), grid=(3,), ), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_array_equal(y, x + 3) def test_hoisted_smem_space(self): # TODO(sharadmv,apaszke): enable SMEM scratch spaces # TODO(sharadmv,apaszke): add support for ()-shaped SMEM refs self.skipTest('Currently doesn\'t work') def kernel(y_ref, scratch_ref): scratch_ref[0, 0] = pl.program_id(0) y_ref[...] = jnp.broadcast_to(scratch_ref[0, 0], y_ref.shape) y = pl.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, in_specs=[], scratch_shapes=[pltpu.SMEM((1, 1), jnp.int32)], out_specs=pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), grid=(2,), ), out_shape=jax.ShapeDtypeStruct((2, 8, 128), jnp.int32), )() expected = jnp.broadcast_to(jnp.arange(2, dtype=jnp.int32)[..., None, None], (2, 8, 128)) np.testing.assert_array_equal(y, expected) def test_hoisted_semaphore(self): def kernel(x_bbm_ref, y_ref, sem, dma_sem): pltpu.semaphore_signal(sem) pltpu.semaphore_wait(sem) pltpu.async_copy(x_bbm_ref, y_ref, dma_sem).wait() x = jnp.arange(8 * 128.).reshape((8, 128)) y = self.pallas_call( kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=0, in_specs=[ pl.BlockSpec(memory_space=pl.ANY), ], scratch_shapes=[pltpu.SemaphoreType.REGULAR, pltpu.SemaphoreType.DMA], out_specs=pl.BlockSpec(memory_space=pltpu.VMEM), ), out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32), )(x) np.testing.assert_array_equal(y, x) @jtu.thread_unsafe_test() # Uses a lot of TPU memory. def test_large_array_indexing(self): n = 6 dtype = jnp.bfloat16 # This test sometimes OOMs on smaller chips. We garbage collect # to increase the chance there is 6GB memory available. gc.collect() x = jax.lax.broadcasted_iota(dtype, (n, 1024 * 1024, 512), 0) def kernel(index, x, y, sem): pltpu.async_copy(x.at[index[0]], y.at[:], sem).wait() run = self.pallas_call(kernel, grid_spec=pltpu.PrefetchScalarGridSpec( num_scalar_prefetch=1, in_specs=[ pl.BlockSpec( memory_space=pl.ANY)], out_specs=pl.BlockSpec( memory_space=pl.ANY), scratch_shapes=[pltpu.SemaphoreType.DMA], ), out_shape=jax.ShapeDtypeStruct(x.shape[1:], dtype), ) for i in range(x.shape[0]): y = run(jnp.array([i], dtype=jnp.int32), x) np.testing.assert_array_equal(y, i) del y def test_dynamic_dma_on_2nd_minor(self): def kernel(array, data, index, size, _, sem): pltpu.async_copy( data.at[pl.ds(0, size[0])], array.at[pl.ds(index[0], size[0])], sem ).wait() def run(array, data, index, size): return pl.pallas_call( kernel, out_shape=array, in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.VMEM), pl.BlockSpec(memory_space=pltpu.SMEM), pl.BlockSpec(memory_space=pltpu.SMEM), ], scratch_shapes=[ pltpu.SemaphoreType.DMA, ], out_specs=pl.BlockSpec(memory_space=pltpu.ANY), input_output_aliases={0: 0}, )(array, data, index, size) array = jnp.zeros((1024, 128), jnp.int32) data = jnp.ones((8, 128), jnp.int32) index = jnp.array([3], jnp.int32) size = jnp.array([5], jnp.int32) expected = array.at[index[0] : index[0] + size[0]].set( data[index[0] : index[0] + size[0]] ) result = run(array, data, index, size) np.testing.assert_array_equal(result, expected) def test_unused_dma_descriptor_error(self): x = jnp.arange(8 * 128.0).reshape((8, 128)) @functools.partial( pl.pallas_call, out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype), in_specs=[pl.BlockSpec(memory_space=pltpu.HBM)], scratch_shapes=[pltpu.SemaphoreType.DMA], out_specs=pl.BlockSpec(memory_space=pltpu.HBM), ) def kernel(x_hbm_ref, o_hbm_ref, sem): pltpu.make_async_copy(x_hbm_ref, o_hbm_ref, sem) with self.assertLogs(level='ERROR') as log: kernel(x) [message] = log.output self.assertIn('AsyncCopyDescriptor was not used', message)
PallasCallDMATest
python
pytorch__pytorch
torch/distributed/fsdp/_fully_shard/_fsdp_state.py
{ "start": 2402, "end": 18049 }
class ____(_State): def __init__(self) -> None: super().__init__() self._fsdp_param_group: Optional[FSDPParamGroup] = None self._is_root: Optional[bool] = None # root set during lazy init self._state_ctx = FSDPStateContext() self._comm_ctx = FSDPCommContext() self._training_state: TrainingState = TrainingState.IDLE self._states_to_forward_prefetch: list[FSDPState] = [] self._states_to_backward_prefetch: list[FSDPState] = [] self._modules_to_run_forward: set[nn.Module] = set() # ``False`` when user set reshard_after_forward # through ``fully_shard`` or ``set_reshard_after_forward`` self._auto_reshard_after_forward: Optional[bool] = True # Define a separate init since `__init__` is called in the contract def init( self, modules: tuple[nn.Module, ...], device: torch.device, mp_policy: MixedPrecisionPolicy, auto_reshard_after_forward: bool, ) -> None: for module in modules: _insert_module_state(module, self) self._modules = modules # pyrefly: ignore [read-only] self._device = device self._device_handle = _get_device_handle(device.type) self._mp_policy = mp_policy self._auto_reshard_after_forward = auto_reshard_after_forward if len(modules) == 1: self._pre_forward_hook_handle = modules[0].register_forward_pre_hook( self._pre_forward, prepend=True, with_kwargs=True ) self._post_forward_hook_handle = modules[0].register_forward_hook( self._post_forward, prepend=False ) else: hook_handle = _register_group_forward_hooks( modules, self._pre_forward, self._post_forward, self._modules_to_run_forward, ) self._pre_forward_hook_handle = hook_handle self._post_forward_hook_handle = hook_handle def _root_pre_forward( self, module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any] ) -> tuple[tuple[Any, ...], dict[str, Any]]: self._lazy_init() if self._state_ctx.iter_forward_root is not None: return args, kwargs if not compiled_autograd_enabled(): logger.debug("FSDP::root_pre_forward") self._state_ctx.iter_forward_root = self with torch.profiler.record_function("FSDP::root_pre_forward"): # Wait for optimizer before implicitly prefetched all-gathers if (event := self._state_ctx.post_optim_event) is not None: self._comm_ctx.all_gather_copy_in_stream.wait_event(event) self._comm_ctx.all_gather_stream.wait_event(event) self._state_ctx.post_optim_event = None else: current_stream = self._device_handle.current_stream() self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream) self._comm_ctx.all_gather_stream.wait_stream(current_stream) if self._device.type in [ "cuda", "hpu", "xpu", "mtia", torch._C._get_privateuse1_backend_name(), ]: with torch.profiler.record_function("FSDP::inputs_to_device"): args_tuple, kwargs_tuple = _to_kwargs( args, kwargs, self._device, False ) # same as DDP args, kwargs = args_tuple[0], kwargs_tuple[0] return args, kwargs def _lazy_init(self) -> None: """ Lazy initialization represents when all modules' parallelisms have finalized (e.g. FSDP has been applied to all desired modules). This means that we can determine which state is the root, and we do so by the 1st state to run forward. """ if self._is_root is not None: return # no-op: already initialized self._is_root = True if len(self._modules) > 1: raise RuntimeError( f"FSDP requires a single root module but got {self._modules}" ) detect_compiled_autograd() root_module = self._modules[0] visited_states: set[FSDPState] = set() for module_name, module in root_module.named_modules(): if (state := _get_module_fsdp_state(module)) is None: continue if module is not root_module: if state not in visited_states and state._is_root is not None: raise RuntimeError( "FSDP state has already been lazily initialized for " f"{module_name}\nFSDP requires running forward through " "the root module first" ) state._is_root = False self._state_ctx.all_states.append(state) visited_states.add(state) if self._fsdp_param_group and self._auto_reshard_after_forward: # For the root, do not reshard after forward since for training, # the parameters would be freed and all-gathered immediately self._fsdp_param_group.post_forward_mesh_info = None self._init_fqns() self._init_shared_state() # Run parameter group lazy inits after initializing FQNs for improved # error messages for state in self._state_ctx.all_states: if state._fsdp_param_group: state._fsdp_param_group.lazy_init() def _init_shared_state(self) -> None: self._comm_ctx.lazy_init(self._device) for state in self._state_ctx.all_states: state._state_ctx = self._state_ctx state._comm_ctx = self._comm_ctx if fsdp_param_group := state._fsdp_param_group: fsdp_param_group.comm_ctx = self._comm_ctx def _init_fqns(self) -> None: """Sets module and parameter FQN attributes for debugging.""" if not self._is_root: raise AssertionError("Expected _is_root to be True") root_module = self._modules[0] param_to_fsdp_param: dict[nn.Parameter, FSDPParam] = {} module_to_fsdp_param_group: dict[nn.Module, FSDPParamGroup] = {} for state in self._state_ctx.all_states: if fsdp_param_group := state._fsdp_param_group: for fsdp_param in fsdp_param_group.fsdp_params: param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param for module in fsdp_param_group.modules: module_to_fsdp_param_group[module] = fsdp_param_group for param_name, param in root_module.named_parameters(): if param in param_to_fsdp_param: param_to_fsdp_param[param]._param_fqn = param_name for module_name, module in root_module.named_modules(): if module in module_to_fsdp_param_group: module_fqn = module_to_fsdp_param_group[module]._module_fqn if module_fqn is None: module_to_fsdp_param_group[module]._module_fqn = module_name else: if not isinstance(module_fqn, str): raise AssertionError( f"Expected module_fqn to be str, got {type(module_fqn)}: {module_fqn}" ) module_fqn += f", {module_name}" module_to_fsdp_param_group[module]._module_fqn = module_fqn @disable_if_config_true def _pre_forward( self, module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any] ) -> tuple[tuple[Any, ...], dict[str, Any]]: # When composing with module-hook-based activation checkpointing, the # pre-backward hook is responsible for the unshard if self._training_state == TrainingState.PRE_BACKWARD: return args, kwargs self._training_state = TrainingState.FORWARD args, kwargs = self._root_pre_forward(module, args, kwargs) if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype: with torch.profiler.record_function("FSDP::cast_forward_inputs"): cast_fn = functools.partial( _cast_fp_tensor, self._mp_policy.param_dtype ) args, kwargs = ( _apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs), ) if self._fsdp_param_group: args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs) for fsdp_state in self._states_to_forward_prefetch: if (target_param_group := fsdp_state._fsdp_param_group) is not None: FSDPParamGroup._prefetch_unshard(target_param_group, "forward") return args, kwargs @disable_if_config_true def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any: # When composing with module-hook-based activation checkpointing, the # post-backward hook is responsible for the reshard if self._training_state == TrainingState.PRE_BACKWARD: return output if self._fsdp_param_group: output = self._fsdp_param_group.post_forward(module, input, output) output = self._register_pre_backward_hook(output) self._training_state = TrainingState.IDLE if self._state_ctx.iter_forward_root is self: if all_gather_state := self._comm_ctx.all_gather_state: # Free the last all-gather result if needed; refer to # [Note: Overlapping all-gather copy-in and all-gather] self._comm_ctx.all_gather_copy_in_stream.wait_event( all_gather_state.event ) self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event) self._comm_ctx.all_gather_state = None # free the all-gather result self._state_ctx.iter_forward_root = None if self._mp_policy.output_dtype is not None: with torch.profiler.record_function("FSDP::cast_forward_outputs"): output = _apply_to_tensors( functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype), output, ) return output def _pre_backward(self, grad: torch.Tensor) -> torch.Tensor: self._training_state = TrainingState.PRE_BACKWARD self._register_root_post_backward_final_callback() if self._fsdp_param_group: default_prefetch = len(self._states_to_backward_prefetch) == 0 self._fsdp_param_group.pre_backward(default_prefetch) for fsdp_state in self._states_to_backward_prefetch: if (target_param_group := fsdp_state._fsdp_param_group) is not None: FSDPParamGroup._prefetch_unshard(target_param_group, "backward") return grad def _root_post_backward_final_callback(self) -> None: if not compiled_autograd_enabled(): logger.debug("FSDP::root_post_backward") with torch.profiler.record_function("FSDP::root_post_backward_callback"): for state in self._state_ctx.all_states: fsdp_param_group = state._fsdp_param_group if ( fsdp_param_group and fsdp_param_group._training_state != TrainingState.POST_BACKWARD ): # Run post-backward in case forward inputs did not require # gradient so the autograd backward did not run fsdp_param_group.post_backward() state._training_state = TrainingState.IDLE if fsdp_param_group: fsdp_param_group._training_state = TrainingState.IDLE if self._state_ctx.is_last_backward: state._finalize_backward() if self._state_ctx.is_last_backward: self._comm_ctx.post_forward_order.clear() if self._comm_ctx.reduce_scatter_state is not None: self._device_handle.current_stream().wait_event( self._comm_ctx.reduce_scatter_state.event ) self._comm_ctx.reduce_scatter_state = None self._state_ctx.post_backward_final_callback_queued = False def _finalize_backward(self) -> None: if self._modules_to_run_forward: msg = ( f"{len(self._modules_to_run_forward)} of the {len(self._modules)} " f"modules passed to fully_shard did not run forward before backward, " "which is error-prone since FSDP post-forward/pre-backward logic " "will not run for these modules. We recommend passing only modules " "that run forward together. Modules that did not run forward: " f"{list(self._modules_to_run_forward)}" ) warning_once(logger, msg, stacklevel=2) # Clear since we want the next forward to run self._modules_to_run_forward.clear() if self._fsdp_param_group: self._fsdp_param_group.finalize_backward() def _register_pre_backward_hook(self, output: Any) -> Any: if not torch.is_grad_enabled(): return output flat_outputs, _ = tree_flatten(output) for t in flat_outputs: if torch.is_tensor(t) and t.requires_grad: t.register_hook(self._pre_backward) return output def _register_root_post_backward_final_callback(self): if self._state_ctx.post_backward_final_callback_queued: return self._state_ctx.post_backward_final_callback_queued = True Variable._execution_engine.queue_callback( self._root_post_backward_final_callback ) def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]: state = _get_module_state(module) if isinstance(state, FSDPState): return state return None def _register_group_forward_hooks( modules: Sequence[nn.Module], pre_hook: Callable, post_hook: Callable, modules_to_run: set[nn.Module], ): """ Registers group forward pre and post-hooks. The pre-hook runs upon the first module pre-forward, and the post-hook runs upon the last. If at least one module does not run forward, then the post-hook does not run. """ modules_set = set(modules) @disable_if_config_true @functools.wraps(pre_hook) def wrapped_pre_hook(*args: Any, **kwargs: Any): if len(modules_to_run) == 0: # first to run modules_to_run.update(modules_set) return pre_hook(*args, **kwargs) @disable_if_config_true def get_wrapped_post_hook(module: nn.Module): @functools.wraps(post_hook) def wrapped_post_hook(*args: Any, **kwargs: Any): modules_to_run.discard(module) if len(modules_to_run) == 0: return post_hook(*args, **kwargs) return wrapped_post_hook pre_handles = [ module.register_forward_pre_hook( wrapped_pre_hook, prepend=True, with_kwargs=True ) for module in modules ] post_handles = [ module.register_forward_hook( get_wrapped_post_hook(module), prepend=False, always_call=True ) for module in modules ] return _MultiHandle(tuple(pre_handles + post_handles))
FSDPState
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_pdeque.py
{ "start": 163, "end": 12216 }
class ____(object): """ Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented using two persistent lists. A maximum length can be specified to create a bounded queue. Fully supports the Sequence and Hashable protocols including indexing and slicing but if you need fast random access go for the PVector instead. Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to create an instance. Some examples: >>> x = pdeque([1, 2, 3]) >>> x.left 1 >>> x.right 3 >>> x[0] == x.left True >>> x[-1] == x.right True >>> x.pop() pdeque([1, 2]) >>> x.pop() == x[:-1] True >>> x.popleft() pdeque([2, 3]) >>> x.append(4) pdeque([1, 2, 3, 4]) >>> x.appendleft(4) pdeque([4, 1, 2, 3]) >>> y = pdeque([1, 2, 3], maxlen=3) >>> y.append(4) pdeque([2, 3, 4], maxlen=3) >>> y.appendleft(4) pdeque([4, 1, 2], maxlen=3) """ __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__') def __new__(cls, left_list, right_list, length, maxlen=None): instance = super(PDeque, cls).__new__(cls) instance._left_list = left_list instance._right_list = right_list instance._length = length if maxlen is not None: if not isinstance(maxlen, Integral): raise TypeError('An integer is required as maxlen') if maxlen < 0: raise ValueError("maxlen must be non-negative") instance._maxlen = maxlen return instance @property def right(self): """ Rightmost element in dqueue. """ return PDeque._tip_from_lists(self._right_list, self._left_list) @property def left(self): """ Leftmost element in dqueue. """ return PDeque._tip_from_lists(self._left_list, self._right_list) @staticmethod def _tip_from_lists(primary_list, secondary_list): if primary_list: return primary_list.first if secondary_list: return secondary_list[-1] raise IndexError('No elements in empty deque') def __iter__(self): return chain(self._left_list, self._right_list.reverse()) def __repr__(self): return "pdeque({0}{1})".format(list(self), ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '') __str__ = __repr__ @property def maxlen(self): """ Maximum length of the queue. """ return self._maxlen def pop(self, count=1): """ Return new deque with rightmost element removed. Popping the empty queue will return the empty queue. A optional count can be given to indicate the number of elements to pop. Popping with a negative index is the same as popleft. Executes in amortized O(k) where k is the number of elements to pop. >>> pdeque([1, 2]).pop() pdeque([1]) >>> pdeque([1, 2]).pop(2) pdeque([]) >>> pdeque([1, 2]).pop(-1) pdeque([2]) """ if count < 0: return self.popleft(-count) new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count) return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) def popleft(self, count=1): """ Return new deque with leftmost element removed. Otherwise functionally equivalent to pop(). >>> pdeque([1, 2]).popleft() pdeque([2]) """ if count < 0: return self.pop(-count) new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count) return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) @staticmethod def _pop_lists(primary_list, secondary_list, count): new_primary_list = primary_list new_secondary_list = secondary_list while count > 0 and (new_primary_list or new_secondary_list): count -= 1 if new_primary_list.rest: new_primary_list = new_primary_list.rest elif new_primary_list: new_primary_list = new_secondary_list.reverse() new_secondary_list = plist() else: new_primary_list = new_secondary_list.reverse().rest new_secondary_list = plist() return new_primary_list, new_secondary_list def _is_empty(self): return not self._left_list and not self._right_list def __lt__(self, other): if not isinstance(other, PDeque): return NotImplemented return tuple(self) < tuple(other) def __eq__(self, other): if not isinstance(other, PDeque): return NotImplemented if tuple(self) == tuple(other): # Sanity check of the length value since it is redundant (there for performance) assert len(self) == len(other) return True return False def __hash__(self): return hash(tuple(self)) def __len__(self): return self._length def append(self, elem): """ Return new deque with elem as the rightmost element. >>> pdeque([1, 2]).append(3) pdeque([1, 2, 3]) """ new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem) return PDeque(new_left_list, new_right_list, new_length, self._maxlen) def appendleft(self, elem): """ Return new deque with elem as the leftmost element. >>> pdeque([1, 2]).appendleft(3) pdeque([3, 1, 2]) """ new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem) return PDeque(new_left_list, new_right_list, new_length, self._maxlen) def _append(self, primary_list, secondary_list, elem): if self._maxlen is not None and self._length == self._maxlen: if self._maxlen == 0: return primary_list, secondary_list, 0 new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1) return new_primary_list, new_secondary_list.cons(elem), self._length return primary_list, secondary_list.cons(elem), self._length + 1 @staticmethod def _extend_list(the_list, iterable): count = 0 for elem in iterable: the_list = the_list.cons(elem) count += 1 return the_list, count def _extend(self, primary_list, secondary_list, iterable): new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable) new_secondary_list = secondary_list current_len = self._length + extend_count if self._maxlen is not None and current_len > self._maxlen: pop_len = current_len - self._maxlen new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len) extend_count -= pop_len return new_primary_list, new_secondary_list, extend_count def extend(self, iterable): """ Return new deque with all elements of iterable appended to the right. >>> pdeque([1, 2]).extend([3, 4]) pdeque([1, 2, 3, 4]) """ new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable) return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) def extendleft(self, iterable): """ Return new deque with all elements of iterable appended to the left. NB! The elements will be inserted in reverse order compared to the order in the iterable. >>> pdeque([1, 2]).extendleft([3, 4]) pdeque([4, 3, 1, 2]) """ new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable) return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) def count(self, elem): """ Return the number of elements equal to elem present in the queue >>> pdeque([1, 2, 1]).count(1) 2 """ return self._left_list.count(elem) + self._right_list.count(elem) def remove(self, elem): """ Return new deque with first element from left equal to elem removed. If no such element is found a ValueError is raised. >>> pdeque([2, 1, 2]).remove(2) pdeque([1, 2]) """ try: return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1) except ValueError: # Value not found in left list, try the right list try: # This is severely inefficient with a double reverse, should perhaps implement a remove_last()? return PDeque(self._left_list, self._right_list.reverse().remove(elem).reverse(), self._length - 1) except ValueError as e: raise ValueError('{0} not found in PDeque'.format(elem)) from e def reverse(self): """ Return reversed deque. >>> pdeque([1, 2, 3]).reverse() pdeque([3, 2, 1]) Also supports the standard python reverse function. >>> reversed(pdeque([1, 2, 3])) pdeque([3, 2, 1]) """ return PDeque(self._right_list, self._left_list, self._length) __reversed__ = reverse def rotate(self, steps): """ Return deque with elements rotated steps steps. >>> x = pdeque([1, 2, 3]) >>> x.rotate(1) pdeque([3, 1, 2]) >>> x.rotate(-2) pdeque([3, 1, 2]) """ popped_deque = self.pop(steps) if steps >= 0: return popped_deque.extendleft(islice(self.reverse(), steps)) return popped_deque.extend(islice(self, -steps)) def __reduce__(self): # Pickling support return pdeque, (list(self), self._maxlen) def __getitem__(self, index): if isinstance(index, slice): if index.step is not None and index.step != 1: # Too difficult, no structural sharing possible return pdeque(tuple(self)[index], maxlen=self._maxlen) result = self if index.start is not None: result = result.popleft(index.start % self._length) if index.stop is not None: result = result.pop(self._length - (index.stop % self._length)) return result if not isinstance(index, Integral): raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) if index >= 0: return self.popleft(index).left shifted = len(self) + index if shifted < 0: raise IndexError( "pdeque index {0} out of range {1}".format(index, len(self)), ) return self.popleft(shifted).left index = Sequence.index Sequence.register(PDeque) Hashable.register(PDeque) def pdeque(iterable=(), maxlen=None): """ Return deque containing the elements of iterable. If maxlen is specified then len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen. >>> pdeque([1, 2, 3]) pdeque([1, 2, 3]) >>> pdeque([1, 2, 3, 4], maxlen=2) pdeque([3, 4], maxlen=2) """ t = tuple(iterable) if maxlen is not None: t = t[-maxlen:] length = len(t) pivot = int(length / 2) left = plist(t[:pivot]) right = plist(t[pivot:], reverse=True) return PDeque(left, right, length, maxlen) def dq(*elements): """ Return deque containing all arguments. >>> dq(1, 2, 3) pdeque([1, 2, 3]) """ return pdeque(elements)
PDeque
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 24754, "end": 25020 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ( "AUTOMATED_KANBAN_V2", "AUTOMATED_REVIEWS_KANBAN", "BASIC_KANBAN", "BUG_TRIAGE", )
ProjectTemplate
python
huggingface__transformers
src/transformers/models/doge/modeling_doge.py
{ "start": 2499, "end": 3220 }
class ____(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ DogeRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
DogeRMSNorm
python
wandb__wandb
wandb/errors/errors.py
{ "start": 368, "end": 604 }
class ____(Error): """Error communicating with W&B servers.""" def __init__(self, msg: str, exc: Exception | None = None) -> None: self.exc = exc self.message = msg super().__init__(self.message)
CommError
python
huggingface__transformers
tests/models/deepseek_vl/test_processing_deepseek_vl.py
{ "start": 840, "end": 2725 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = DeepseekVLProcessor @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") return tokenizer_class( vocab_file=SAMPLE_VOCAB, extra_special_tokens={ "pad_token": "<|end▁of▁sentence|>", "image_token": "<image_placeholder>", }, ) @staticmethod def prepare_processor_dict(): return { "chat_template": "{% set seps = ['\n\n', '<\uff5cend\u2581of\u2581sentence\uff5c>'] %}{% set i = 0 %}You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\n{% for message in messages %}{% if message['role']|lower == 'user' %}User: {% elif message['role']|lower == 'assistant' %}Assistant:{% if not (loop.last and not add_generation_prompt and message['content'][0]['type']=='text' and message['content'][0]['text']=='') %} {% endif %}{% else %}{{ message['role'].capitalize() }}: {% endif %}{% for content in message['content'] %}{% if content['type'] == 'image' %}<image_placeholder>{% elif content['type'] == 'text' %}{% set text = content['text'] %}{% if loop.first %}{% set text = text.lstrip() %}{% endif %}{% if loop.last %}{% set text = text.rstrip() %}{% endif %}{% if not loop.first and message['content'][loop.index0-1]['type'] == 'text' %}{{ ' ' + text }}{% else %}{{ text }}{% endif %}{% endif %}{% endfor %}{% if not loop.last or add_generation_prompt %}{% if message['role']|lower == 'user' %}{{ seps[0] }}{% else %}{{ seps[1] }}{% endif %}{% endif %}{% endfor %}{% if add_generation_prompt %}Assistant:{% endif %}", "num_image_tokens": 576, } # fmt: skip
DeepseekVLProcessorTest
python
pytorch__pytorch
torch/_inductor/utils.py
{ "start": 52804, "end": 53646 }
class ____(IndentedBuffer): def __init__(self) -> None: super().__init__() def __getattribute__(self, name: str) -> Any: if name == "__class__": # Allow access to the class attribute return object.__getattribute__(self, name) raise RuntimeError( f"Tried to call self.{name} on FakeIndentedBuffer. This buffer" "is currently used on TritonTemplateKernel to prevent actual" "writes to the body without explicitly specifying the body with" "`TritonTemplateKernel.set_subgraph_body(name)`" ) @contextlib.contextmanager def restore_stdout_stderr() -> Iterator[None]: initial_stdout, initial_stderr = sys.stdout, sys.stderr try: yield finally: sys.stdout, sys.stderr = initial_stdout, initial_stderr
FakeIndentedBuffer
python
encode__starlette
tests/middleware/test_base.py
{ "start": 6204, "end": 42823 }
class ____(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: ctxvar.set("set by middleware") resp = await call_next(request) assert ctxvar.get() == "set by endpoint" return resp # pragma: no cover @pytest.mark.parametrize( "middleware_cls", [ CustomMiddlewareWithoutBaseHTTPMiddleware, pytest.param( CustomMiddlewareUsingBaseHTTPMiddleware, marks=pytest.mark.xfail( reason=( "BaseHTTPMiddleware creates a TaskGroup which copies the context" "and erases any changes to it made within the TaskGroup" ), raises=AssertionError, ), ), ], ) def test_contextvars( test_client_factory: TestClientFactory, middleware_cls: _MiddlewareFactory[Any], ) -> None: # this has to be an async endpoint because Starlette calls run_in_threadpool # on sync endpoints which has it's own set of peculiarities w.r.t propagating # contextvars (it propagates them forwards but not backwards) async def homepage(request: Request) -> PlainTextResponse: assert ctxvar.get() == "set by middleware" ctxvar.set("set by endpoint") return PlainTextResponse("Homepage") app = Starlette(middleware=[Middleware(middleware_cls)], routes=[Route("/", homepage)]) client = test_client_factory(app) response = client.get("/") assert response.status_code == 200, response.content @pytest.mark.anyio async def test_run_background_tasks_even_if_client_disconnects() -> None: # test for https://github.com/Kludex/starlette/issues/1438 response_complete = anyio.Event() background_task_run = anyio.Event() async def sleep_and_set() -> None: # small delay to give BaseHTTPMiddleware a chance to cancel us # this is required to make the test fail prior to fixing the issue # so do not be surprised if you remove it and the test still passes await anyio.sleep(0.1) background_task_run.set() async def endpoint_with_background_task(_: Request) -> PlainTextResponse: return PlainTextResponse(background=BackgroundTask(sleep_and_set)) async def passthrough( request: Request, call_next: RequestResponseEndpoint, ) -> Response: return await call_next(request) app = Starlette( middleware=[Middleware(BaseHTTPMiddleware, dispatch=passthrough)], routes=[Route("/", endpoint_with_background_task)], ) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", } async def receive() -> Message: raise NotImplementedError("Should not be called!") async def send(message: Message) -> None: if message["type"] == "http.response.body": if not message.get("more_body", False): # pragma: no branch response_complete.set() await app(scope, receive, send) assert background_task_run.is_set() def test_run_background_tasks_raise_exceptions(test_client_factory: TestClientFactory) -> None: # test for https://github.com/Kludex/starlette/issues/2625 async def sleep_and_set() -> None: await anyio.sleep(0.1) raise ValueError("TEST") async def endpoint_with_background_task(_: Request) -> PlainTextResponse: return PlainTextResponse(background=BackgroundTask(sleep_and_set)) async def passthrough(request: Request, call_next: RequestResponseEndpoint) -> Response: return await call_next(request) app = Starlette( middleware=[Middleware(BaseHTTPMiddleware, dispatch=passthrough)], routes=[Route("/", endpoint_with_background_task)], ) client = test_client_factory(app) with pytest.raises(ValueError, match="TEST"): client.get("/") def test_exception_can_be_caught(test_client_factory: TestClientFactory) -> None: async def error_endpoint(_: Request) -> None: raise ValueError("TEST") async def catches_error(request: Request, call_next: RequestResponseEndpoint) -> Response: try: return await call_next(request) except ValueError as exc: return PlainTextResponse(content=str(exc), status_code=400) app = Starlette( middleware=[Middleware(BaseHTTPMiddleware, dispatch=catches_error)], routes=[Route("/", error_endpoint)], ) client = test_client_factory(app) response = client.get("/") assert response.status_code == 400 assert response.text == "TEST" @pytest.mark.anyio async def test_do_not_block_on_background_tasks() -> None: response_complete = anyio.Event() events: list[str | Message] = [] async def sleep_and_set() -> None: events.append("Background task started") await anyio.sleep(0.1) events.append("Background task finished") async def endpoint_with_background_task(_: Request) -> PlainTextResponse: return PlainTextResponse(content="Hello", background=BackgroundTask(sleep_and_set)) async def passthrough(request: Request, call_next: RequestResponseEndpoint) -> Response: return await call_next(request) app = Starlette( middleware=[Middleware(BaseHTTPMiddleware, dispatch=passthrough)], routes=[Route("/", endpoint_with_background_task)], ) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", } async def receive() -> Message: raise NotImplementedError("Should not be called!") async def send(message: Message) -> None: if message["type"] == "http.response.body": events.append(message) if not message.get("more_body", False): response_complete.set() async with anyio.create_task_group() as tg: tg.start_soon(app, scope, receive, send) tg.start_soon(app, scope, receive, send) # Without the fix, the background tasks would start and finish before the # last http.response.body is sent. assert events == [ {"body": b"Hello", "more_body": True, "type": "http.response.body"}, {"body": b"", "more_body": False, "type": "http.response.body"}, {"body": b"Hello", "more_body": True, "type": "http.response.body"}, {"body": b"", "more_body": False, "type": "http.response.body"}, "Background task started", "Background task started", "Background task finished", "Background task finished", ] @pytest.mark.anyio async def test_run_context_manager_exit_even_if_client_disconnects() -> None: # test for https://github.com/Kludex/starlette/issues/1678#issuecomment-1172916042 response_complete = anyio.Event() context_manager_exited = anyio.Event() async def sleep_and_set() -> None: # small delay to give BaseHTTPMiddleware a chance to cancel us # this is required to make the test fail prior to fixing the issue # so do not be surprised if you remove it and the test still passes await anyio.sleep(0.1) context_manager_exited.set() class ContextManagerMiddleware: def __init__(self, app: ASGIApp): self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async with AsyncExitStack() as stack: stack.push_async_callback(sleep_and_set) await self.app(scope, receive, send) async def simple_endpoint(_: Request) -> PlainTextResponse: return PlainTextResponse(background=BackgroundTask(sleep_and_set)) async def passthrough( request: Request, call_next: RequestResponseEndpoint, ) -> Response: return await call_next(request) app = Starlette( middleware=[ Middleware(BaseHTTPMiddleware, dispatch=passthrough), Middleware(ContextManagerMiddleware), ], routes=[Route("/", simple_endpoint)], ) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", } async def receive() -> Message: raise NotImplementedError("Should not be called!") async def send(message: Message) -> None: if message["type"] == "http.response.body": if not message.get("more_body", False): # pragma: no branch response_complete.set() await app(scope, receive, send) assert context_manager_exited.is_set() def test_app_receives_http_disconnect_while_sending_if_discarded( test_client_factory: TestClientFactory, ) -> None: class DiscardingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: Any, ) -> PlainTextResponse: # As a matter of ordering, this test targets the case where the downstream # app response is discarded while it is sending a response body. # We need to wait for the downstream app to begin sending a response body # before sending the middleware response that will overwrite the downstream # response. downstream_app_response = await call_next(request) body_generator = downstream_app_response.body_iterator try: await body_generator.__anext__() finally: await body_generator.aclose() return PlainTextResponse("Custom") async def downstream_app( scope: Scope, receive: Receive, send: Send, ) -> None: await send( { "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), ], } ) async with anyio.create_task_group() as task_group: async def cancel_on_disconnect( *, task_status: TaskStatus[None] = anyio.TASK_STATUS_IGNORED, ) -> None: task_status.started() while True: message = await receive() if message["type"] == "http.disconnect": # pragma: no branch task_group.cancel_scope.cancel() break # Using start instead of start_soon to ensure that # cancel_on_disconnect is scheduled by the event loop # before we start returning the body await task_group.start(cancel_on_disconnect) # A timeout is set for 0.1 second in order to ensure that # we never deadlock the test run in an infinite loop with anyio.move_on_after(0.1): while True: await send( { "type": "http.response.body", "body": b"chunk ", "more_body": True, } ) pytest.fail("http.disconnect should have been received and canceled the scope") # pragma: no cover app = DiscardingMiddleware(downstream_app) client = test_client_factory(app) response = client.get("/does_not_exist") assert response.text == "Custom" def test_app_receives_http_disconnect_after_sending_if_discarded( test_client_factory: TestClientFactory, ) -> None: class DiscardingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> PlainTextResponse: await call_next(request) return PlainTextResponse("Custom") async def downstream_app( scope: Scope, receive: Receive, send: Send, ) -> None: await send( { "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), ], } ) await send( { "type": "http.response.body", "body": b"first chunk, ", "more_body": True, } ) await send( { "type": "http.response.body", "body": b"second chunk", "more_body": True, } ) message = await receive() assert message["type"] == "http.disconnect" app = DiscardingMiddleware(downstream_app) client = test_client_factory(app) response = client.get("/does_not_exist") assert response.text == "Custom" def test_read_request_stream_in_app_after_middleware_calls_stream( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: expected = [b""] async for chunk in request.stream(): assert chunk == expected.pop(0) assert expected == [] return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: expected = [b"a", b""] async for chunk in request.stream(): assert chunk == expected.pop(0) assert expected == [] return await call_next(request) app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_stream_in_app_after_middleware_calls_body( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: expected = [b"a", b""] async for chunk in request.stream(): assert chunk == expected.pop(0) assert expected == [] return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: assert await request.body() == b"a" return await call_next(request) app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_body_in_app_after_middleware_calls_stream( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: assert await request.body() == b"" return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: expected = [b"a", b""] async for chunk in request.stream(): assert chunk == expected.pop(0) assert expected == [] return await call_next(request) app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_body_in_app_after_middleware_calls_body( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: assert await request.body() == b"a" return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: assert await request.body() == b"a" return await call_next(request) app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_stream_in_dispatch_after_app_calls_stream( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: expected = [b"a", b""] async for chunk in request.stream(): assert chunk == expected.pop(0) assert expected == [] return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: resp = await call_next(request) with pytest.raises(RuntimeError, match="Stream consumed"): async for _ in request.stream(): raise AssertionError("should not be called") # pragma: no cover return resp app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_stream_in_dispatch_after_app_calls_body( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: assert await request.body() == b"a" return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: resp = await call_next(request) with pytest.raises(RuntimeError, match="Stream consumed"): async for _ in request.stream(): raise AssertionError("should not be called") # pragma: no cover return resp app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 @pytest.mark.anyio async def test_read_request_stream_in_dispatch_wrapping_app_calls_body() -> None: async def endpoint(scope: Scope, receive: Receive, send: Send) -> None: request = Request(scope, receive) async for chunk in request.stream(): # pragma: no branch assert chunk == b"2" break await Response()(scope, receive, send) class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: expected = b"1" response: Response | None = None async for chunk in request.stream(): # pragma: no branch assert chunk == expected if expected == b"1": response = await call_next(request) expected = b"3" else: break assert response is not None return response async def rcv() -> AsyncGenerator[Message, None]: yield {"type": "http.request", "body": b"1", "more_body": True} yield {"type": "http.request", "body": b"2", "more_body": True} yield {"type": "http.request", "body": b"3"} raise AssertionError( # pragma: no cover "Should not be called, no need to poll for disconnect" ) sent: list[Message] = [] async def send(msg: Message) -> None: sent.append(msg) app: ASGIApp = endpoint app = ConsumingMiddleware(app) rcv_stream = rcv() await app({"type": "http"}, rcv_stream.__anext__, send) assert sent == [ { "type": "http.response.start", "status": 200, "headers": [(b"content-length", b"0")], }, {"type": "http.response.body", "body": b"", "more_body": False}, ] await rcv_stream.aclose() def test_read_request_stream_in_dispatch_after_app_calls_body_with_middleware_calling_body_before_call_next( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: assert await request.body() == b"a" return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: assert await request.body() == b"a" # this buffers the request body in memory resp = await call_next(request) async for chunk in request.stream(): if chunk: assert chunk == b"a" return resp app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 def test_read_request_body_in_dispatch_after_app_calls_body_with_middleware_calling_body_before_call_next( test_client_factory: TestClientFactory, ) -> None: async def homepage(request: Request) -> PlainTextResponse: assert await request.body() == b"a" return PlainTextResponse("Homepage") class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: assert await request.body() == b"a" # this buffers the request body in memory resp = await call_next(request) assert await request.body() == b"a" # no problem here return resp app = Starlette( routes=[Route("/", homepage, methods=["POST"])], middleware=[Middleware(ConsumingMiddleware)], ) client: TestClient = test_client_factory(app) response = client.post("/", content=b"a") assert response.status_code == 200 @pytest.mark.anyio async def test_read_request_disconnected_client() -> None: """If we receive a disconnect message when the downstream ASGI app calls receive() the Request instance passed into the dispatch function should get marked as disconnected. The downstream ASGI app should not get a ClientDisconnect raised, instead if should just receive the disconnect message. """ async def endpoint(scope: Scope, receive: Receive, send: Send) -> None: msg = await receive() assert msg["type"] == "http.disconnect" await Response()(scope, receive, send) class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: response = await call_next(request) disconnected = await request.is_disconnected() assert disconnected is True return response scope = {"type": "http", "method": "POST", "path": "/"} async def receive() -> AsyncGenerator[Message, None]: yield {"type": "http.disconnect"} raise AssertionError("Should not be called, would hang") # pragma: no cover async def send(msg: Message) -> None: if msg["type"] == "http.response.start": assert msg["status"] == 200 app: ASGIApp = ConsumingMiddleware(endpoint) rcv = receive() await app(scope, rcv.__anext__, send) await rcv.aclose() @pytest.mark.anyio async def test_read_request_disconnected_after_consuming_steam() -> None: async def endpoint(scope: Scope, receive: Receive, send: Send) -> None: msg = await receive() assert msg.pop("more_body", False) is False assert msg == {"type": "http.request", "body": b"hi"} msg = await receive() assert msg == {"type": "http.disconnect"} await Response()(scope, receive, send) class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: await request.body() disconnected = await request.is_disconnected() assert disconnected is True response = await call_next(request) return response scope = {"type": "http", "method": "POST", "path": "/"} async def receive() -> AsyncGenerator[Message, None]: yield {"type": "http.request", "body": b"hi"} yield {"type": "http.disconnect"} raise AssertionError("Should not be called, would hang") # pragma: no cover async def send(msg: Message) -> None: if msg["type"] == "http.response.start": assert msg["status"] == 200 app: ASGIApp = ConsumingMiddleware(endpoint) rcv = receive() await app(scope, rcv.__anext__, send) await rcv.aclose() def test_downstream_middleware_modifies_receive( test_client_factory: TestClientFactory, ) -> None: """If a downstream middleware modifies receive() the final ASGI app should see the modified version. """ async def endpoint(scope: Scope, receive: Receive, send: Send) -> None: request = Request(scope, receive) body = await request.body() assert body == b"foo foo " await Response()(scope, receive, send) class ConsumingMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: body = await request.body() assert body == b"foo " return await call_next(request) def modifying_middleware(app: ASGIApp) -> ASGIApp: async def wrapped_app(scope: Scope, receive: Receive, send: Send) -> None: async def wrapped_receive() -> Message: msg = await receive() if msg["type"] == "http.request": # pragma: no branch msg["body"] = msg["body"] * 2 return msg await app(scope, wrapped_receive, send) return wrapped_app client = test_client_factory(ConsumingMiddleware(modifying_middleware(endpoint))) resp = client.post("/", content=b"foo ") assert resp.status_code == 200 def test_pr_1519_comment_1236166180_example() -> None: """ https://github.com/Kludex/starlette/pull/1519#issuecomment-1236166180 """ bodies: list[bytes] = [] class LogRequestBodySize(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: print(len(await request.body())) return await call_next(request) def replace_body_middleware(app: ASGIApp) -> ASGIApp: async def wrapped_app(scope: Scope, receive: Receive, send: Send) -> None: async def wrapped_rcv() -> Message: msg = await receive() msg["body"] += b"-foo" return msg await app(scope, wrapped_rcv, send) return wrapped_app async def endpoint(request: Request) -> Response: body = await request.body() bodies.append(body) return Response() app: ASGIApp = Starlette(routes=[Route("/", endpoint, methods=["POST"])]) app = replace_body_middleware(app) app = LogRequestBodySize(app) client = TestClient(app) resp = client.post("/", content=b"Hello, World!") resp.raise_for_status() assert bodies == [b"Hello, World!-foo"] @pytest.mark.anyio async def test_multiple_middlewares_stacked_client_disconnected() -> None: """ Tests for: - https://github.com/Kludex/starlette/issues/2516 - https://github.com/Kludex/starlette/pull/2687 """ ordered_events: list[str] = [] unordered_events: list[str] = [] class MyMiddleware(BaseHTTPMiddleware): def __init__(self, app: ASGIApp, version: int) -> None: self.version = version super().__init__(app) async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: ordered_events.append(f"{self.version}:STARTED") res = await call_next(request) ordered_events.append(f"{self.version}:COMPLETED") def background() -> None: unordered_events.append(f"{self.version}:BACKGROUND") assert res.background is None res.background = BackgroundTask(background) return res async def sleepy(request: Request) -> Response: try: await request.body() except ClientDisconnect: pass else: # pragma: no cover raise AssertionError("Should have raised ClientDisconnect") return Response(b"") app = Starlette( routes=[Route("/", sleepy)], middleware=[Middleware(MyMiddleware, version=_ + 1) for _ in range(10)], ) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", } async def receive() -> AsyncIterator[Message]: yield {"type": "http.disconnect"} sent: list[Message] = [] async def send(message: Message) -> None: sent.append(message) await app(scope, receive().__anext__, send) assert ordered_events == [ "1:STARTED", "2:STARTED", "3:STARTED", "4:STARTED", "5:STARTED", "6:STARTED", "7:STARTED", "8:STARTED", "9:STARTED", "10:STARTED", "10:COMPLETED", "9:COMPLETED", "8:COMPLETED", "7:COMPLETED", "6:COMPLETED", "5:COMPLETED", "4:COMPLETED", "3:COMPLETED", "2:COMPLETED", "1:COMPLETED", ] assert sorted(unordered_events) == sorted( [ "1:BACKGROUND", "2:BACKGROUND", "3:BACKGROUND", "4:BACKGROUND", "5:BACKGROUND", "6:BACKGROUND", "7:BACKGROUND", "8:BACKGROUND", "9:BACKGROUND", "10:BACKGROUND", ] ) assert sent == [ { "type": "http.response.start", "status": 200, "headers": [(b"content-length", b"0")], }, {"type": "http.response.body", "body": b"", "more_body": False}, ] @pytest.mark.anyio @pytest.mark.parametrize("send_body", [True, False]) async def test_poll_for_disconnect_repeated(send_body: bool) -> None: async def app_poll_disconnect(scope: Scope, receive: Receive, send: Send) -> None: for _ in range(2): msg = await receive() while msg["type"] == "http.request": msg = await receive() assert msg["type"] == "http.disconnect" await Response(b"good!")(scope, receive, send) class MyMiddleware(BaseHTTPMiddleware): async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: return await call_next(request) app = MyMiddleware(app_poll_disconnect) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", } async def receive() -> AsyncIterator[Message]: # the key here is that we only ever send 1 htt.disconnect message if send_body: yield {"type": "http.request", "body": b"hello", "more_body": True} yield {"type": "http.request", "body": b"", "more_body": False} yield {"type": "http.disconnect"} raise AssertionError("Should not be called, would hang") # pragma: no cover sent: list[Message] = [] async def send(message: Message) -> None: sent.append(message) await app(scope, receive().__anext__, send) assert sent == [ { "type": "http.response.start", "status": 200, "headers": [(b"content-length", b"5")], }, {"type": "http.response.body", "body": b"good!", "more_body": True}, {"type": "http.response.body", "body": b"", "more_body": False}, ] @pytest.mark.anyio async def test_asgi_pathsend_events(tmpdir: Path) -> None: path = tmpdir / "example.txt" with path.open("w") as file: file.write("<file content>") response_complete = anyio.Event() events: list[Message] = [] async def endpoint_with_pathsend(_: Request) -> FileResponse: return FileResponse(path) async def passthrough(request: Request, call_next: RequestResponseEndpoint) -> Response: return await call_next(request) app = Starlette( middleware=[Middleware(BaseHTTPMiddleware, dispatch=passthrough)], routes=[Route("/", endpoint_with_pathsend)], ) scope = { "type": "http", "version": "3", "method": "GET", "path": "/", "headers": [], "extensions": {"http.response.pathsend": {}}, } async def receive() -> Message: raise NotImplementedError("Should not be called!") # pragma: no cover async def send(message: Message) -> None: events.append(message) if message["type"] == "http.response.pathsend": response_complete.set() await app(scope, receive, send) assert len(events) == 2 assert events[0]["type"] == "http.response.start" assert events[1]["type"] == "http.response.pathsend" def test_error_context_propagation(test_client_factory: TestClientFactory) -> None: class PassthroughMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint, ) -> Response: return await call_next(request) def exception_without_context(request: Request) -> None: raise Exception("Exception") def exception_with_context(request: Request) -> None: try: raise Exception("Inner exception") except Exception: raise Exception("Outer exception") def exception_with_cause(request: Request) -> None: try: raise Exception("Inner exception") except Exception as e: raise Exception("Outer exception") from e app = Starlette( routes=[ Route("/exception-without-context", endpoint=exception_without_context), Route("/exception-with-context", endpoint=exception_with_context), Route("/exception-with-cause", endpoint=exception_with_cause), ], middleware=[Middleware(PassthroughMiddleware)], ) client = test_client_factory(app) # For exceptions without context the context is filled with the `anyio.EndOfStream` # but it is suppressed therefore not propagated to traceback. with pytest.raises(Exception) as ctx: client.get("/exception-without-context") assert str(ctx.value) == "Exception" assert ctx.value.__cause__ is None assert ctx.value.__context__ is not None assert ctx.value.__suppress_context__ is True # For exceptions with context the context is propagated as a cause to avoid # `anyio.EndOfStream` error from overwriting it. with pytest.raises(Exception) as ctx: client.get("/exception-with-context") assert str(ctx.value) == "Outer exception" assert ctx.value.__cause__ is not None assert str(ctx.value.__cause__) == "Inner exception" # For exceptions with cause check that it gets correctly propagated. with pytest.raises(Exception) as ctx: client.get("/exception-with-cause") assert str(ctx.value) == "Outer exception" assert ctx.value.__cause__ is not None assert str(ctx.value.__cause__) == "Inner exception"
CustomMiddlewareUsingBaseHTTPMiddleware
python
tiangolo__fastapi
fastapi/responses.py
{ "start": 1216, "end": 1761 }
class ____(JSONResponse): """ JSON response using the high-performance orjson library to serialize data to JSON. Read more about it in the [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/). """ def render(self, content: Any) -> bytes: assert orjson is not None, "orjson must be installed to use ORJSONResponse" return orjson.dumps( content, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY )
ORJSONResponse
python
spack__spack
var/spack/test_repos/spack_repo/builder_test/packages/gmake/package.py
{ "start": 217, "end": 539 }
class ____(Package): """Dummy GMake Package""" homepage = "https://www.gnu.org/software/make" url = "https://ftpmirror.gnu.org/make/make-4.4.tar.gz" version("4.4", sha256="ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed") def do_stage(self): mkdirp(self.stage.source_path)
Gmake
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/tasks.py
{ "start": 37368, "end": 41210 }
class ____(GoogleCloudBaseOperator): """ Lists the tasks in Cloud Tasks. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudTasksTasksListOperator` :param location: The location name in which the tasks were created. :param queue_name: The queue's name. :param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks. If set to None or missing, the default project_id from the Google Cloud connection is used. :param response_view: (Optional) This field specifies which subset of the Task will be returned. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "location", "queue_name", "project_id", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (CloudTasksQueueLink(),) def __init__( self, *, location: str, queue_name: str, project_id: str = PROVIDE_PROJECT_ID, response_view: Task.View | None = None, page_size: int | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: MetaData = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.queue_name = queue_name self.project_id = project_id self.response_view = response_view self.page_size = page_size self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudTasksHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) tasks = hook.list_tasks( location=self.location, queue_name=self.queue_name, project_id=self.project_id, response_view=self.response_view, page_size=self.page_size, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) CloudTasksQueueLink.persist( context=context, queue_name=f"projects/{self.project_id or hook.project_id}/" f"locations/{self.location}/queues/{self.queue_name}", ) return [Task.to_dict(t) for t in tasks]
CloudTasksTasksListOperator
python
apache__airflow
providers/yandex/src/airflow/providers/yandex/operators/dataproc.py
{ "start": 14862, "end": 15463 }
class ____(DataprocBaseOperator): """ Deletes Yandex.Cloud Data Proc cluster. :param connection_id: ID of the Yandex.Cloud Airflow connection. :param cluster_id: ID of the cluster to remove. (templated) """ def __init__(self, *, connection_id: str | None = None, cluster_id: str | None = None, **kwargs) -> None: super().__init__(yandex_conn_id=connection_id, cluster_id=cluster_id, **kwargs) def execute(self, context: Context) -> None: hook = self._setup(context) hook.dataproc_client.delete_cluster(self.cluster_id)
DataprocDeleteClusterOperator
python
getsentry__sentry
src/sentry/backup/services/import_export/model.py
{ "start": 3961, "end": 5254 }
class ____(RpcModel): """ Shadows `sentry.backup.helpers.ImportFlags` for the purpose of passing it over an RPC boundary. """ merge_users: bool = False overwrite_configs: bool = False import_uuid: str | None = None # TODO(azaslavsky): Remove `None` variant once rolled out, set default to `False` instead. hide_organizations: bool | None = None def from_rpc(self) -> ImportFlags: return ImportFlags( merge_users=self.merge_users, overwrite_configs=self.overwrite_configs, import_uuid=self.import_uuid, # TODO(azaslavsky): remove cast. hide_organizations=bool(self.hide_organizations), ) @classmethod def into_rpc(cls, base_flags: ImportFlags) -> "RpcImportFlags": return cls( merge_users=base_flags.merge_users, overwrite_configs=base_flags.overwrite_configs, import_uuid=base_flags.import_uuid, # TODO(azaslavsky): remove cast. hide_organizations=( None if not base_flags.hide_organizations else base_flags.hide_organizations ), ) # Using strings, rather than `auto()` integers, makes this more (though not completely) robust to # version skew. @unique
RpcImportFlags
python
great-expectations__great_expectations
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_have_elevation.py
{ "start": 565, "end": 1894 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. # Please see {some doc} for information on how to choose an id string for your Metric. condition_metric_name = "column_values.elevated" condition_value_keys = () # This method defines the business logic for evaluating your metric when using a PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): column = column.apply(shape) # Set crs to meters geo_ser = geopandas.GeoSeries(column, crs={"proj": "cea"}) # access the length of the column return ~geo_ser.z.isnull() # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # return column.in_([3]) # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # return column.isin([3]) # This class defines the Expectation itself # The main business logic for calculation lives here.
ColumnValuesHaveElevation
python
readthedocs__readthedocs.org
readthedocs/proxito/views/mixins.py
{ "start": 1122, "end": 1208 }
class ____(Exception): """An invalid path was passed to storage."""
InvalidPathError
python
bokeh__bokeh
src/bokeh/models/tiles.py
{ "start": 5360, "end": 5747 }
class ____(MercatorTileSource): ''' Has the same tile origin as the ``WMTSTileSource`` but requests tiles using a `quadkey` argument instead of X, Y, Z e.g. ``http://your.quadkey.tile.host/{Q}.png`` ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs)
QUADKEYTileSource
python
milvus-io__pymilvus
tests/test_bulk_writer_stage.py
{ "start": 7994, "end": 14566 }
class ____: """Test StageFileManager class.""" @pytest.fixture def stage_file_manager(self) -> StageFileManager: """Create a StageFileManager instance.""" return StageFileManager( cloud_endpoint="https://api.cloud.zilliz.com", api_key="test_api_key", stage_name="test_stage", connect_type=ConnectType.AUTO, ) @pytest.fixture def mock_stage_info(self) -> Dict[str, Any]: """Mock stage information.""" return { "stageName": "test_stage", "stagePrefix": "prefix/", "endpoint": "s3.amazonaws.com", "bucketName": "test-bucket", "region": "us-west-2", "cloud": "aws", "condition": {"maxContentLength": 1073741824}, "credentials": { "tmpAK": "test_access_key", "tmpSK": "test_secret_key", "sessionToken": "test_token", "expireTime": "2099-12-31T23:59:59Z", }, } def test_convert_dir_path(self, stage_file_manager: StageFileManager) -> None: """Test directory path conversion.""" assert stage_file_manager._convert_dir_path("") == "" assert stage_file_manager._convert_dir_path("/") == "" assert stage_file_manager._convert_dir_path("data") == "data/" assert stage_file_manager._convert_dir_path("data/") == "data/" @patch("pymilvus.bulk_writer.stage_file_manager.apply_stage") @patch("pymilvus.bulk_writer.stage_file_manager.Minio") def test_refresh_stage_and_client( self, mock_minio: Mock, mock_apply: Mock, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any], ) -> None: """Test refreshing stage info and client.""" mock_response = Mock() mock_response.json.return_value = {"data": mock_stage_info} mock_apply.return_value = mock_response stage_file_manager._refresh_stage_and_client("data/") assert stage_file_manager.stage_info == mock_stage_info mock_apply.assert_called_once() mock_minio.assert_called_once() def test_validate_size_success( self, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any] ) -> None: """Test successful size validation.""" stage_file_manager.stage_info = mock_stage_info stage_file_manager.total_bytes = 1000000 # 1MB # Should not raise any exception stage_file_manager._validate_size() def test_validate_size_failure( self, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any] ) -> None: """Test size validation failure.""" stage_file_manager.stage_info = mock_stage_info stage_file_manager.total_bytes = 2147483648 # 2GB with pytest.raises(ValueError, match="exceeds the maximum contentLength limit"): stage_file_manager._validate_size() @patch("pymilvus.bulk_writer.stage_file_manager.FileUtils.process_local_path") @patch.object(StageFileManager, "_refresh_stage_and_client") @patch.object(StageFileManager, "_validate_size") @patch.object(StageFileManager, "_put_object") def test_upload_file_to_stage( self, mock_put_object: Mock, mock_validate: Mock, mock_refresh: Mock, mock_process: Mock, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any], ) -> None: """Test uploading file to stage.""" with tempfile.TemporaryDirectory() as temp_dir: # Create test files test_file = Path(temp_dir) / "test.txt" test_file.write_text("test content") mock_process.return_value = ([str(test_file)], 12) stage_file_manager.stage_info = mock_stage_info result = stage_file_manager.upload_file_to_stage(str(test_file), "data/") assert result["stageName"] == "test_stage" assert result["path"] == "data/" mock_refresh.assert_called_once_with("data/") mock_validate.assert_called_once() @patch.object(StageFileManager, "_upload_with_retry") @patch.object(StageFileManager, "_refresh_stage_and_client") def test_put_object_refresh_on_expiry( self, mock_refresh: Mock, mock_upload: Mock, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any], ) -> None: """Test that credentials are refreshed when expired.""" # Set expired credentials expired_info = mock_stage_info.copy() expired_info["credentials"]["expireTime"] = "2020-01-01T00:00:00Z" stage_file_manager.stage_info = expired_info stage_file_manager._put_object("test.txt", "remote/test.txt", "data/") mock_refresh.assert_called_once_with("data/") mock_upload.assert_called_once() @patch("pymilvus.bulk_writer.stage_file_manager.Minio") def test_upload_with_retry_success( self, mock_minio: Mock, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any] ) -> None: """Test successful upload with retry.""" stage_file_manager.stage_info = mock_stage_info stage_file_manager._client = mock_minio.return_value stage_file_manager._upload_with_retry("test.txt", "remote/test.txt", "data/") stage_file_manager._client.fput_object.assert_called_once_with( bucket_name="test-bucket", object_name="remote/test.txt", file_path="test.txt", ) @patch("pymilvus.bulk_writer.stage_file_manager.Minio") @patch.object(StageFileManager, "_refresh_stage_and_client") def test_upload_with_retry_failure( self, mock_refresh: Mock, mock_minio: Mock, stage_file_manager: StageFileManager, mock_stage_info: Dict[str, Any], ) -> None: """Test upload failure after max retries.""" stage_file_manager.stage_info = mock_stage_info mock_client = mock_minio.return_value mock_client.fput_object.side_effect = Exception("Upload failed") stage_file_manager._client = mock_client with pytest.raises(RuntimeError, match="Upload failed after 2 attempts"): stage_file_manager._upload_with_retry("test.txt", "remote/test.txt", "data/", max_retries=2) assert mock_client.fput_object.call_count == 2 assert mock_refresh.call_count == 2 # Refreshed on each retry
TestStageFileManager
python
facebook__pyre-check
client/configuration/search_path.py
{ "start": 3338, "end": 4155 }
class ____(RawElement): root: str subdirectory: str def expand_global_root(self, global_root: str) -> "SubdirectoryRawElement": return SubdirectoryRawElement( root=filesystem.expand_global_root(self.root, global_root=global_root), subdirectory=self.subdirectory, ) def expand_relative_root(self, relative_root: str) -> "SubdirectoryRawElement": return SubdirectoryRawElement( root=_expand_relative_root(self.root, relative_root=relative_root), subdirectory=self.subdirectory, ) def expand_glob(self) -> List[RawElement]: return [self] def to_element(self) -> SubdirectoryElement: return SubdirectoryElement(self.root, self.subdirectory) @dataclasses.dataclass(frozen=True)
SubdirectoryRawElement
python
pytorch__pytorch
torch/distributions/categorical.py
{ "start": 317, "end": 6221 }
class ____(Distribution): r""" Creates a categorical distribution parameterized by either :attr:`probs` or :attr:`logits` (but not both). .. note:: It is equivalent to the distribution that :func:`torch.multinomial` samples from. Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``. If `probs` is 1-dimensional with length-`K`, each element is the relative probability of sampling the class at that index. If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of relative probability vectors. .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1 along the last dimension. :attr:`probs` will return this normalized value. The `logits` argument will be interpreted as unnormalized log probabilities and can therefore be any real number. It will likewise be normalized so that the resulting probabilities sum to 1 along the last dimension. :attr:`logits` will return this normalized value. See also: :func:`torch.multinomial` Example:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) >>> m.sample() # equal probability of 0, 1, 2, 3 tensor(3) Args: probs (Tensor): event probabilities logits (Tensor): event log probabilities (unnormalized) """ # pyrefly: ignore [bad-override] arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} has_enumerate_support = True def __init__( self, probs: Optional[Tensor] = None, logits: Optional[Tensor] = None, validate_args: Optional[bool] = None, ) -> None: if (probs is None) == (logits is None): raise ValueError( "Either `probs` or `logits` must be specified, but not both." ) if probs is not None: if probs.dim() < 1: raise ValueError("`probs` parameter must be at least one-dimensional.") # pyrefly: ignore [read-only] self.probs = probs / probs.sum(-1, keepdim=True) else: assert logits is not None # helps mypy if logits.dim() < 1: raise ValueError("`logits` parameter must be at least one-dimensional.") # Normalize # pyrefly: ignore [read-only] self.logits = logits - logits.logsumexp(dim=-1, keepdim=True) self._param = self.probs if probs is not None else self.logits self._num_events = self._param.size()[-1] batch_shape = ( self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size() ) super().__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Categorical, _instance) batch_shape = torch.Size(batch_shape) param_shape = batch_shape + torch.Size((self._num_events,)) if "probs" in self.__dict__: new.probs = self.probs.expand(param_shape) new._param = new.probs if "logits" in self.__dict__: new.logits = self.logits.expand(param_shape) new._param = new.logits new._num_events = self._num_events super(Categorical, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @constraints.dependent_property(is_discrete=True, event_dim=0) # pyrefly: ignore [bad-override] def support(self): return constraints.integer_interval(0, self._num_events - 1) @lazy_property def logits(self) -> Tensor: return probs_to_logits(self.probs) @lazy_property def probs(self) -> Tensor: return logits_to_probs(self.logits) @property def param_shape(self) -> torch.Size: return self._param.size() @property def mean(self) -> Tensor: return torch.full( self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device, ) @property def mode(self) -> Tensor: return self.probs.argmax(dim=-1) @property def variance(self) -> Tensor: return torch.full( self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device, ) def sample(self, sample_shape=torch.Size()): if not isinstance(sample_shape, torch.Size): sample_shape = torch.Size(sample_shape) probs_2d = self.probs.reshape(-1, self._num_events) samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T return samples_2d.reshape(self._extended_shape(sample_shape)) def log_prob(self, value): if self._validate_args: self._validate_sample(value) value = value.long().unsqueeze(-1) value, log_pmf = torch.broadcast_tensors(value, self.logits) value = value[..., :1] return log_pmf.gather(-1, value).squeeze(-1) def entropy(self): min_real = torch.finfo(self.logits.dtype).min logits = torch.clamp(self.logits, min=min_real) p_log_p = logits * self.probs return -p_log_p.sum(-1) def enumerate_support(self, expand=True): num_events = self._num_events values = torch.arange(num_events, dtype=torch.long, device=self._param.device) values = values.view((-1,) + (1,) * len(self._batch_shape)) if expand: values = values.expand((-1,) + self._batch_shape) return values
Categorical
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/translate.py
{ "start": 3754, "end": 4107 }
class ____(BaseGoogleLink): """ Helper class for constructing Translation Legacy Model Predict link. Legacy Models are created and managed by AutoML API. """ name = "Translation Legacy Model Predict" key = "translation_legacy_model_predict" format_str = TRANSLATION_LEGACY_MODEL_PREDICT_LINK
TranslationLegacyModelPredictLink
python
numba__numba
numba/core/typing/templates.py
{ "start": 16986, "end": 17197 }
class ____(InternalError): def __init__(self, reason): super(_EmptyImplementationEntry, self).__init__( "_EmptyImplementationEntry({!r})".format(reason), )
_EmptyImplementationEntry
python
doocs__leetcode
solution/0900-0999/0940.Distinct Subsequences II/Solution2.py
{ "start": 0, "end": 230 }
class ____: def distinctSubseqII(self, s: str) -> int: mod = 10**9 + 7 dp = [0] * 26 for c in s: i = ord(c) - ord('a') dp[i] = sum(dp) % mod + 1 return sum(dp) % mod
Solution
python
facebook__pyre-check
client/commands/tests/infer_test.py
{ "start": 14488, "end": 26199 }
class ____(testslide.TestCase): maxDiff = 2000 def test_module_annotations_from_infer_output(self) -> None: def assert_result( path: str, infer_output: infer.RawInferOutputForPath, options: infer.StubGenerationOptions, expected: infer.ModuleAnnotations, ) -> None: self.assertEqual( infer.ModuleAnnotations.from_infer_output(path, infer_output, options), expected, ) default_path = "test.py" default_qualifier = "test" default_options = infer.StubGenerationOptions() assert_result( path=default_path, infer_output=infer.RawInferOutputForPath(qualifier=default_qualifier), options=default_options, expected=infer.ModuleAnnotations( qualifier=default_qualifier, path=default_path, options=default_options ), ) assert_result( path=default_path, infer_output=infer.RawInferOutputForPath( qualifier=default_qualifier, define_annotations=[ infer.RawDefineAnnotation( name="test.Foo.foo", parent="test.Foo", location=infer.RawAnnotationLocation( qualifier="test", path="test.py", line=1 ), return_="int", is_async=True, ), infer.RawDefineAnnotation( name="test.bar", location=infer.RawAnnotationLocation( qualifier="test", path="test.py", line=2 ), ), ], ), options=default_options, expected=infer.ModuleAnnotations( qualifier=default_qualifier, path=default_path, options=default_options, functions=[ infer.FunctionAnnotation( name="test.bar", return_annotation=infer.TypeAnnotation.from_raw( None, qualifier=default_qualifier, options=default_options, ), parameters=[], is_async=False, ) ], methods=[ infer.MethodAnnotation( parent="test.Foo", name="test.Foo.foo", return_annotation=infer.TypeAnnotation.from_raw( "int", qualifier=default_qualifier, options=default_options, ), parameters=[], is_async=True, ) ], ), ) assert_result( path=default_path, infer_output=infer.RawInferOutputForPath( qualifier=default_qualifier, global_annotations=[ infer.RawGlobalAnnotation( name="x", location=infer.RawAnnotationLocation( qualifier="test", path="test.py", line=3 ), annotation="int", ) ], ), options=default_options, expected=infer.ModuleAnnotations( qualifier=default_qualifier, path=default_path, options=default_options, globals_=[ infer.GlobalAnnotation( name="x", annotation=infer.TypeAnnotation.from_raw( "int", qualifier=default_qualifier, options=default_options, ), ) ], ), ) def test_module_annotations_from_infer_output__attributes(self) -> None: def assert_result( path: str, infer_output: infer.RawInferOutputForPath, options: infer.StubGenerationOptions, expected: infer.ModuleAnnotations, ) -> None: self.assertEqual( infer.ModuleAnnotations.from_infer_output(path, infer_output, options), expected, ) default_path = "test.py" default_qualifier = "test" default_options = infer.StubGenerationOptions() assert_result( path=default_path, infer_output=infer.RawInferOutputForPath( qualifier=default_qualifier, attribute_annotations=[ infer.RawAttributeAnnotation( parent="foo.bar.test.Foo", name="x", location=infer.RawAnnotationLocation( qualifier="foo.bar.test", path="foo/bar/test.py", line=3 ), annotation="int", ) ], ), options=default_options, expected=infer.ModuleAnnotations( qualifier=default_qualifier, path=default_path, options=default_options, ), ) annotate_attribute_options = infer.StubGenerationOptions( annotate_attributes=True, ) assert_result( path=default_path, infer_output=infer.RawInferOutputForPath( qualifier=default_qualifier, attribute_annotations=[ infer.RawAttributeAnnotation( parent="foo.bar.test.Foo", name="x", location=infer.RawAnnotationLocation( qualifier="foo.bar.test", path="foo/bar/test.py", line=3 ), annotation="int", ) ], ), options=annotate_attribute_options, expected=infer.ModuleAnnotations( qualifier=default_qualifier, path=default_path, options=annotate_attribute_options, attributes=[ infer.AttributeAnnotation( parent="foo.bar.test.Foo", name="x", annotation=infer.TypeAnnotation.from_raw( "int", qualifier=default_qualifier, options=annotate_attribute_options, ), ) ], ), ) def test_create_module_annotations(self) -> None: def assert_created( infer_output: infer.RawInferOutput, base_path: Path, expected: List[ExpectedModuleAnnotationItem], ) -> None: default_options = infer.StubGenerationOptions() self.assertCountEqual( infer.create_module_annotations( infer_output, base_path, default_options ), [ infer.ModuleAnnotations.from_infer_output( path=item.path, infer_output=item.infer_output, options=default_options, ) for item in expected ], ) foo_global0 = infer.RawGlobalAnnotation( name="x", location=infer.RawAnnotationLocation( qualifier="p0.foo", path="/root/p0/foo.py", line=1 ), annotation="int", ) foo_global1 = infer.RawGlobalAnnotation( name="y", location=infer.RawAnnotationLocation( qualifier="p0.foo", path="/root/p0/foo.py", line=2 ), annotation="str", ) bar_global0 = infer.RawGlobalAnnotation( name="x", location=infer.RawAnnotationLocation( qualifier="p1.bar", path="/root/p1/bar.py", line=1 ), annotation="int", ) bar_attribute0 = infer.RawAttributeAnnotation( parent="bar.Foo", name="a", location=infer.RawAnnotationLocation( qualifier="p1.bar", path="/root/p1/bar.py", line=2 ), annotation="bool", ) # Empty case assert_created( infer_output=infer.RawInferOutput(), base_path=Path("irrelevant"), expected=[], ) # Test proper splits by paths assert_created( infer_output=infer.RawInferOutput( global_annotations=[ foo_global0, bar_global0, foo_global1, ], attribute_annotations=[ bar_attribute0, ], ), base_path=Path("/root"), expected=[ ExpectedModuleAnnotationItem( path="p0/foo.py", infer_output=infer.RawInferOutputForPath( qualifier="p0.foo", global_annotations=[foo_global0, foo_global1], ), ), ExpectedModuleAnnotationItem( path="p1/bar.py", infer_output=infer.RawInferOutputForPath( qualifier="p1.bar", global_annotations=[bar_global0], attribute_annotations=[bar_attribute0], ), ), ], ) # Test relativization & path filtering # # Note that the qualifier doesn't inherently correspond to the # relative path - a local configuration can be nested inside a # project, in which case the qualifier is still relative to the # project root. assert_created( infer_output=infer.RawInferOutput( global_annotations=[ foo_global0, bar_global0, foo_global1, ], ), base_path=Path("/root/p1"), expected=[ ExpectedModuleAnnotationItem( path="bar.py", infer_output=infer.RawInferOutputForPath( qualifier="p1.bar", global_annotations=[bar_global0], ), ) ], ) def test_module_annotation_stubs_path(self) -> None: self.assertEqual( infer.ModuleAnnotations( qualifier="derp", path="derp.py", options=infer.StubGenerationOptions(), ).stubs_path(Path("/root")), Path("/root/derp.pyi"), ) def _assert_stubs_equal(actual: str, expected: str) -> None: actual = actual.strip() expected = textwrap.dedent(expected.rstrip()) if actual != expected: print(f"---\nactual\n---\n{actual}") print(f"---\nexpected\n---\n{expected}") raise AssertionError("Stubs not as expected, see stdout")
ModuleAnnotationTest
python
pytorch__pytorch
test/test_datapipe.py
{ "start": 21650, "end": 22537 }
class ____(IterDataPipe): def __init__(self, input_dp): super().__init__() self.input_dp = input_dp # Prevent in-place modification def __iter__(self): input_dp = ( self.input_dp if isinstance(self.input_dp, IterDataPipe) else copy.deepcopy(self.input_dp) ) yield from input_dp def _fake_fn(data): return data def _fake_add(constant, data): return constant + data def _fake_filter_fn(data): return True def _simple_filter_fn(data): return data >= 5 def _fake_filter_fn_constant(constant, data): return data >= constant def _mul_10(x): return x * 10 def _mod_3_test(x): return x % 3 == 1 def _to_list(x): return [x] lambda_fn1 = lambda x: x # noqa: E731 lambda_fn2 = lambda x: x % 2 # noqa: E731 lambda_fn3 = lambda x: x >= 5 # noqa: E731
IDP_NoLen
python
pytorch__pytorch
test/nn/attention/test_fa4.py
{ "start": 4095, "end": 10014 }
class ____(TestCase): @classmethod def setUpClass(cls): super().setUpClass() if not _fa4_dependencies_available(): return # This might pollute tests.. TODO activate_flash_attention_impl("FA4") @unittest.skipUnless(_fa4_dependencies_available(), "FA4 backend unavailable") def _assert_flash_matches_math( self, device, shape: SdpaShape, dtype: torch.dtype, is_causal: bool, rtol: int = 2, test_backward: bool = True, ) -> None: q = torch.randn(shape, dtype=dtype, device=device).requires_grad_(True) k = torch.randn(shape, dtype=dtype, device=device).requires_grad_(True) v = torch.randn(shape, dtype=dtype, device=device).requires_grad_(True) # Forward pass comparison out_flash, out_math_low, out_math_fp32 = flash_vs_math( self, q, k, v, is_causal=is_causal, rtol=rtol ) if test_backward: # Backward pass comparison g = torch.randn_like(out_flash) # Flash gradients dq_flash, dk_flash, dv_flash = torch.autograd.grad( out_flash, (q, k, v), g, retain_graph=True ) # Math fp32 gradients (reference) dq_math_fp32, dk_math_fp32, dv_math_fp32 = torch.autograd.grad( out_math_fp32, (q, k, v), g, retain_graph=True ) # Math low precision gradients dq_math_low, dk_math_low, dv_math_low = torch.autograd.grad( out_math_low, (q, k, v), g ) # Calculate gradient tolerances (similar to flash-attention tests) dq_atol = 2 * (dq_math_fp32 + 0.3 - 0.3 - dq_math_fp32).abs().max().item() dk_atol = 2 * (dk_math_fp32 + 0.3 - 0.3 - dk_math_fp32).abs().max().item() dv_atol = 2 * (dv_math_fp32 + 0.3 - 0.3 - dv_math_fp32).abs().max().item() # Check flash gradients are within tolerance of math low precision dq_math_low_error = (dq_math_low - dq_math_fp32).abs().max().item() dq_flash_error = (dq_flash - dq_math_fp32).abs().max().item() self.assertLessEqual( dq_flash_error, rtol * dq_math_low_error + dq_atol, f"dQ: Flash error {dq_flash_error:.2e} exceeds {rtol}x Math-low error {dq_math_low_error:.2e} + {dq_atol:.2e}", ) dk_math_low_error = (dk_math_low - dk_math_fp32).abs().max().item() dk_flash_error = (dk_flash - dk_math_fp32).abs().max().item() self.assertLessEqual( dk_flash_error, rtol * dk_math_low_error + dk_atol, f"dK: Flash error {dk_flash_error:.2e} exceeds {rtol}x Math-low error {dk_math_low_error:.2e} + {dk_atol:.2e}", ) dv_math_low_error = (dv_math_low - dv_math_fp32).abs().max().item() dv_flash_error = (dv_flash - dv_math_fp32).abs().max().item() self.assertLessEqual( dv_flash_error, rtol * (dv_math_low_error + dv_atol), f"dV: Flash error {dv_flash_error:.2e} exceeds {rtol}x (Math-low error {dv_math_low_error:.2e} + {dv_atol:.2e})", ) @unittest.skipUnless(_fa4_dependencies_available(), "FA4 backend unavailable") @parametrize("dtype", [torch.float16, torch.bfloat16]) @parametrize("batch", [1, 2]) @parametrize( "seq_len", [ 512, 1024, ], ) @parametrize("heads", [4, 8]) @parametrize("head_dim", [64, 128]) @parametrize( "is_causal", [False, True], ) def test_flash_attention_matches_math( self, device, dtype, batch, seq_len, heads, head_dim, is_causal ): # TODO: Getting bad TMA setup on dO w/ headdim = 64, will take a look test_backward = head_dim == 128 and dtype == torch.float16 shape = SdpaShape(batch, heads, seq_len, head_dim) self._assert_flash_matches_math( device, shape=shape, dtype=dtype, is_causal=is_causal, # Bwd is consistently erroring test_backward=test_backward, ) @unittest.skipUnless(_fa4_dependencies_available(), "FA4 backend unavailable") @parametrize("dtype", [torch.float16, torch.bfloat16]) def test_fa4_kernel_called(self, device, dtype): shape = SdpaShape(2, 4, 512, 128) q = torch.randn(shape, dtype=dtype, device=device, requires_grad=True) k = torch.randn(shape, dtype=dtype, device=device, requires_grad=True) v = torch.randn(shape, dtype=dtype, device=device, requires_grad=True) with cuda_kernel_profiler("flash_attncute") as prof_result: with sdpa_kernel(SDPBackend.FLASH_ATTENTION): out = F.scaled_dot_product_attention( q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False ) out.sum().backward() self.assertTrue( prof_result["found"], f"FA4 CUTE kernel not found in forward/backward. Available kernels: {prof_result['kernel_names']}", ) q.grad = None k.grad = None v.grad = None with cuda_kernel_profiler("flash_attncute") as prof_result: with sdpa_kernel(SDPBackend.MATH): out = F.scaled_dot_product_attention( q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False ) out.sum().backward() self.assertFalse( prof_result["found"], f"FA4 CUTE kernel unexpectedly found with MATH backend. Kernels: {prof_result['kernel_names']}", ) instantiate_device_type_tests(TestFlashAttentionFA4, globals(), only_for="cuda") if __name__ == "__main__": run_tests()
TestFlashAttentionFA4
python
keras-team__keras
keras/src/trainers/data_adapters/generator_data_adapter_test.py
{ "start": 874, "end": 8528 }
class ____(testing.TestCase): @parameterized.named_parameters( named_product( [ {"testcase_name": "use_weight", "use_sample_weight": True}, {"testcase_name": "no_weight", "use_sample_weight": False}, ], generator_type=["np", "tf", "jax", "torch"], ) ) def test_basic_flow(self, use_sample_weight, generator_type): x = np.random.random((34, 4)).astype("float32") y = np.array([[i, i] for i in range(34)], dtype="float32") sw = np.random.random((34,)).astype("float32") if generator_type == "tf": x, y, sw = tf.constant(x), tf.constant(y), tf.constant(sw) elif generator_type == "jax": x, y, sw = jnp.array(x), jnp.array(y), jnp.array(sw) elif generator_type == "torch": x, y, sw = ( torch.as_tensor(x), torch.as_tensor(y), torch.as_tensor(sw), ) if not use_sample_weight: sw = None make_generator = example_generator( x, y, sample_weight=sw, batch_size=16, ) adapter = generator_data_adapter.GeneratorDataAdapter(make_generator()) if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = ( jax.Array if generator_type == "jax" else np.ndarray ) elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor sample_order = [] for i, batch in enumerate(it): if use_sample_weight: self.assertEqual(len(batch), 3) bx, by, bsw = batch else: self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, expected_class) self.assertIsInstance(by, expected_class) self.assertEqual(bx.dtype, by.dtype) self.assertContainsExactSubsequence(str(bx.dtype), "float32") if i < 2: self.assertEqual(bx.shape, (16, 4)) self.assertEqual(by.shape, (16, 2)) else: self.assertEqual(bx.shape, (2, 4)) self.assertEqual(by.shape, (2, 2)) if use_sample_weight: self.assertIsInstance(bsw, expected_class) for j in range(by.shape[0]): sample_order.append(by[j, 0]) self.assertAllClose(sample_order, list(range(34))) def test_with_different_shapes(self): def generator(): yield np.ones([16, 4], "float32"), np.ones([16, 2], "float32") yield np.ones([16, 5], "float32"), np.ones([16, 2], "float32") yield np.ones([2, 6], "float32"), np.ones([2, 2], "float32") adapter = generator_data_adapter.GeneratorDataAdapter(generator()) if backend.backend() == "numpy": it = adapter.get_numpy_iterator() elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() elif backend.backend() == "jax": it = adapter.get_jax_iterator() elif backend.backend() == "torch": it = adapter.get_torch_dataloader() for i, batch in enumerate(it): self.assertEqual(len(batch), 2) bx, by = batch self.assertEqual(bx.dtype, by.dtype) self.assertContainsExactSubsequence(str(bx.dtype), "float32") if i == 0: self.assertEqual(bx.shape, (16, 4)) self.assertEqual(by.shape, (16, 2)) elif i == 1: self.assertEqual(bx.shape, (16, 5)) self.assertEqual(by.shape, (16, 2)) else: self.assertEqual(bx.shape, (2, 6)) self.assertEqual(by.shape, (2, 2)) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="tf.data.Dataset specific behavior", ) def test_with_unexpected_shapes(self): def generator(): yield np.ones([16, 4], "float32"), np.ones([16, 2], "float32") yield np.ones([16, 5], "float32"), np.ones([16, 2], "float32") yield np.ones([16, 6], "float32"), np.ones([16, 3], "float32") adapter = generator_data_adapter.GeneratorDataAdapter(generator()) it = iter(adapter.get_tf_dataset()) next(it) next(it) # note that Tensorflow wraps the TypeError in an InvalidArgumentError. with self.assertRaisesRegex( tf.errors.InvalidArgumentError, "TypeError:.* shape \\(16, 3\\).* shape \\(None, 2\\) was expected" ".*first two batches", ): next(it) @parameterized.named_parameters( named_product(generator_type=["tf", "jax", "scipy"]) ) @pytest.mark.skipif( not backend.SUPPORTS_SPARSE_TENSORS, reason="Backend does not support sparse tensors", ) def test_sparse_tensors(self, generator_type): if generator_type == "tf": x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 4)) y = tf.SparseTensor([[0, 0], [1, 1]], [3.0, 4.0], (2, 2)) elif generator_type == "jax": x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 4)) y = jax_sparse.BCOO(([3.0, 4.0], [[0, 0], [1, 1]]), shape=(2, 2)) elif generator_type == "scipy": x = scipy.sparse.coo_matrix(([1.0, 2.0], ([0, 1], [0, 2])), (2, 4)) y = scipy.sparse.coo_matrix(([3.0, 4.0], ([0, 1], [0, 1])), (2, 2)) def generate(): for _ in range(4): yield x, y adapter = generator_data_adapter.GeneratorDataAdapter(generate()) if backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.SparseTensor elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax_sparse.BCOO for batch in it: self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, expected_class) self.assertIsInstance(by, expected_class) self.assertEqual(bx.shape, (2, 4)) self.assertEqual(by.shape, (2, 2)) @pytest.mark.skipif( not backend.SUPPORTS_RAGGED_TENSORS, reason="Backend does not support ragged tensors", ) def test_ragged_tensors(self): x = tf.ragged.constant( [[[0.0, 1.0]], [[2.0, 3.0], [4.0, 5.0]]], ragged_rank=1 ) y = tf.ragged.constant( [[[0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], ragged_rank=1 ) def generate(): for _ in range(4): yield x, y adapter = generator_data_adapter.GeneratorDataAdapter(generate()) if backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.RaggedTensor for batch in it: self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, expected_class) self.assertIsInstance(by, expected_class) self.assertEqual(bx.shape, (2, None, 2)) self.assertEqual(by.shape, (2, None, 2))
GeneratorDataAdapterTest
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/sparse_ops/sparse_tensor_dense_matmul_op_d9m_test.py
{ "start": 2528, "end": 3860 }
class ____(test.TestCase): """Test d9m-unimplemented exceptions from SparseTensorDenseMatmulOp. Test that tf.errors.UnimplementedError is thrown, as appropriate, by the GPU-specific code-paths through SparseTensorDenseMatmulOp when deterministic ops are enabled. This test assumes that sparse_tensor_dense_matmul_op_test.py runs equivalent test cases when deterministic ops are not enabled and will therefore detect erroneous exception throwing in those cases. """ @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testExceptionThrowing(self): with self.session(), test_util.force_gpu(): for data_type in [ np.float16, np.float32, np.float64, np.complex64, np.complex128 ]: sparse_input, dense_input = _gen_data( m=5, k=10, n=7, nnz=20, row_occupied_rate=0.9, data_type=data_type, seed=456) with self.assertRaisesRegex( errors.UnimplementedError, "A deterministic GPU implementation of SparseTensorDenseMatmulOp" + " is not currently available."): result = sparse_ops.sparse_tensor_dense_matmul( sparse_input, dense_input) self.evaluate(result)
SparseTensorDenseMatmulOpDeterminismExceptionsTest
python
spyder-ide__spyder
spyder/utils/syntaxhighlighters.py
{ "start": 67666, "end": 68689 }
class ____(GenericSH): """gettext Syntax Highlighter""" # Syntax highlighting rules: PROG = re.compile(make_gettext_patterns(), re.S) #============================================================================== # yaml highlighter #============================================================================== def make_yaml_patterns(): "Strongly inspired from sublime highlighter " kw = any("keyword", [r":|>|-|\||\[|\]|[A-Za-z][\w\s\-\_ ]+(?=:)"]) links = any("normal", [r"#:[^\n]*"]) comment = any("comment", [r"#[^\n]*"]) number = any("number", [r"\b[+-]?[0-9]+[lL]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' string = any("string", [sqstring, dqstring]) return "|".join([kw, string, number, links, comment, any("SYNC", [r"\n"])])
GetTextSH
python
mamba-org__mamba
micromamba/tests/test_config.py
{ "start": 3153, "end": 6558 }
class ____: @pytest.mark.parametrize( "rc_file", (("home", "dummy.yaml"), ("home", ".mambarc")), indirect=True ) @pytest.mark.parametrize("rc_file_args", ({"override_channels_enabled": True},), indirect=True) @pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"]) @pytest.mark.parametrize("norc", [False, True]) def test_config_sources(self, rc_file, quiet_flag, norc): if norc: with pytest.raises(subprocess.CalledProcessError): config("sources", quiet_flag, "--rc-file", rc_file, "--no-rc") else: res = config("sources", quiet_flag, "--rc-file", rc_file) rc_file_short = str(rc_file).replace(os.path.expanduser("~"), "~") assert res.strip().splitlines() == ( f"Configuration files (by precedence order):\n{rc_file_short}".splitlines() ) @pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"]) @pytest.mark.parametrize("norc", [False, True]) def test_config_sources_empty(self, tmp_prefix, quiet_flag, norc): if norc: res = config("sources", quiet_flag, "--no-rc") assert res.strip() == "Configuration files disabled by --no-rc flag" else: res = config("sources", quiet_flag) assert res.startswith("Configuration files (by precedence order):") # TODO: test system located sources? @pytest.mark.parametrize( "rc_file", ( # "/etc/conda/.condarc", # "/etc/conda/condarc", # "/etc/conda/condarc.d/", # "/etc/conda/.mambarc", # "/var/lib/conda/.condarc", # "/var/lib/conda/condarc", # "/var/lib/conda/condarc.d/", # "/var/lib/conda/.mambarc", ("user_config_dir", "mambarc"), ("env_set_xdg", "mambarc"), ("home", ".conda/.condarc"), ("home", ".conda/condarc"), ("home", ".conda/condarc.d"), ("home", ".condarc"), ("home", ".mambarc"), ("root_prefix", ".condarc"), ("root_prefix", "condarc"), ("root_prefix", "condarc.d"), ("root_prefix", ".mambarc"), ("prefix", ".condarc"), ("prefix", "condarc"), ("prefix", "condarc.d"), ("prefix", ".mambarc"), ), indirect=True, ) @pytest.mark.parametrize("rc_file_args", ({"override_channels_enabled": True},), indirect=True) def test_config_rc_file(self, rc_file, tmp_env_name): srcs = config("sources", "-n", tmp_env_name).strip().splitlines() short_name = str(rc_file).replace(os.path.expanduser("~"), "~") expected_srcs = f"Configuration files (by precedence order):\n{short_name}".splitlines() assert srcs == expected_srcs @pytest.mark.parametrize( "rc_file", [("home", "somefile.yml")], indirect=True, ) @pytest.mark.parametrize("rc_file_args", ({"override_channels_enabled": True},), indirect=True) def test_config_expand_user(self, rc_file): rc_file_short = str(rc_file).replace(os.path.expanduser("~"), "~") res = config("sources", "--rc-file", rc_file) assert ( res.strip().splitlines() == f"Configuration files (by precedence order):\n{rc_file_short}".splitlines() )
TestConfigSources
python
ray-project__ray
python/ray/autoscaler/_private/gcp/node.py
{ "start": 5176, "end": 5888 }
class ____(GCPNode): """Abstraction around compute nodes""" # https://cloud.google.com/compute/docs/instances/instance-life-cycle NON_TERMINATED_STATUSES = {"PROVISIONING", "STAGING", "RUNNING"} TERMINATED_STATUSES = {"TERMINATED", "SUSPENDED"} RUNNING_STATUSES = {"RUNNING"} STATUS_FIELD = "status" def get_labels(self) -> dict: return self.get("labels", {}) def get_external_ip(self) -> str: return ( self.get("networkInterfaces", [{}])[0] .get("accessConfigs", [{}])[0] .get("natIP", None) ) def get_internal_ip(self) -> str: return self.get("networkInterfaces", [{}])[0].get("networkIP")
GCPComputeNode
python
matplotlib__matplotlib
galleries/examples/widgets/menu.py
{ "start": 400, "end": 2639 }
class ____(artist.Artist): padx = 0.05 # inches pady = 0.05 def __init__(self, fig, labelstr, props=None, hoverprops=None, on_select=None): super().__init__() self.set_figure(fig) self.labelstr = labelstr self.props = props if props is not None else ItemProperties() self.hoverprops = ( hoverprops if hoverprops is not None else ItemProperties()) if self.props.fontsize != self.hoverprops.fontsize: raise NotImplementedError( 'support for different font sizes not implemented') self.on_select = on_select # specify coordinates in inches. self.label = fig.text(0, 0, labelstr, transform=fig.dpi_scale_trans, size=props.fontsize) self.text_bbox = self.label.get_window_extent( fig.canvas.get_renderer()) self.text_bbox = fig.dpi_scale_trans.inverted().transform_bbox(self.text_bbox) self.rect = patches.Rectangle( (0, 0), 1, 1, transform=fig.dpi_scale_trans ) # Will be updated later. self.set_hover_props(False) fig.canvas.mpl_connect('button_release_event', self.check_select) def check_select(self, event): over, _ = self.rect.contains(event) if not over: return if self.on_select is not None: self.on_select(self) def set_extent(self, x, y, w, h, depth): self.rect.set(x=x, y=y, width=w, height=h) self.label.set(position=(x + self.padx, y + depth + self.pady / 2)) self.hover = False def draw(self, renderer): self.rect.draw(renderer) self.label.draw(renderer) def set_hover_props(self, b): props = self.hoverprops if b else self.props self.label.set(color=props.labelcolor) self.rect.set(facecolor=props.bgcolor, alpha=props.alpha) def set_hover(self, event): """ Update the hover status of event and return whether it was changed. """ b, _ = self.rect.contains(event) changed = (b != self.hover) if changed: self.set_hover_props(b) self.hover = b return changed
MenuItem
python
doocs__leetcode
solution/2900-2999/2908.Minimum Sum of Mountain Triplets I/Solution.py
{ "start": 0, "end": 446 }
class ____: def minimumSum(self, nums: List[int]) -> int: n = len(nums) right = [inf] * (n + 1) for i in range(n - 1, -1, -1): right[i] = min(right[i + 1], nums[i]) ans = left = inf for i, x in enumerate(nums): if left < x and right[i + 1] < x: ans = min(ans, left + x + right[i + 1]) left = min(left, x) return -1 if ans == inf else ans
Solution
python
langchain-ai__langchain
libs/langchain/langchain_classic/chains/conversational_retrieval/base.py
{ "start": 2439, "end": 2695 }
class ____(BaseModel): """Input type for ConversationalRetrievalChain.""" question: str """The question to answer.""" chat_history: list[CHAT_TURN_TYPE] = Field(default_factory=list) """The chat history to use for retrieval."""
InputType
python
scipy__scipy
scipy/special/tests/test_basic.py
{ "start": 62438, "end": 68800 }
class ____: def test_ellipj_nan(self): """Regression test for #912.""" special.ellipj(0.5, np.nan) def test_ellipj(self): el = special.ellipj(0.2,0) rel = [sin(0.2),cos(0.2),1.0,0.20] assert_allclose(el, rel, atol=1.5e-13, rtol=0) def test_ellipk(self): elk = special.ellipk(.2) assert_allclose(elk, 1.659623598610528, atol=1.5e-11, rtol=0) assert_equal(special.ellipkm1(0.0), np.inf) assert_equal(special.ellipkm1(1.0), pi/2) assert_equal(special.ellipkm1(np.inf), 0.0) assert_equal(special.ellipkm1(np.nan), np.nan) assert_equal(special.ellipkm1(-1), np.nan) assert_allclose(special.ellipk(-10), 0.7908718902387385) def test_ellipkinc(self): elkinc = special.ellipkinc(pi/2,.2) elk = special.ellipk(0.2) assert_allclose(elkinc, elk, atol=1.5e-15, rtol=0) alpha = 20*pi/180 phi = 45*pi/180 m = sin(alpha)**2 elkinc = special.ellipkinc(phi,m) assert_allclose(elkinc, 0.79398143, atol=1.5e-8, rtol=0) # From pg. 614 of A & S assert_equal(special.ellipkinc(pi/2, 0.0), pi/2) assert_equal(special.ellipkinc(pi/2, 1.0), np.inf) assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0) assert_equal(special.ellipkinc(pi/2, np.nan), np.nan) assert_equal(special.ellipkinc(pi/2, 2), np.nan) assert_equal(special.ellipkinc(0, 0.5), 0.0) assert_equal(special.ellipkinc(np.inf, 0.5), np.inf) assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf) assert_equal(special.ellipkinc(np.inf, np.inf), np.nan) assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan) assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan) assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan) assert_equal(special.ellipkinc(np.nan, 0.5), np.nan) assert_equal(special.ellipkinc(np.nan, np.nan), np.nan) assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14) assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946) def test_ellipkinc_2(self): # Regression test for gh-3550 # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value mbad = 0.68359375000000011 phi = 0.9272952180016123 m = np.nextafter(mbad, 0) mvals = [] for j in range(10): mvals.append(m) m = np.nextafter(m, 1) f = special.ellipkinc(phi, mvals) assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1) # this bug also appears at phi + n * pi for at least small n f1 = special.ellipkinc(phi + pi, mvals) assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2) def test_ellipkinc_singular(self): # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2) xlog = np.logspace(-300, -17, 25) xlin = np.linspace(1e-17, 0.1, 25) xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False) assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14) assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14) assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14) assert_equal(special.ellipkinc(np.pi/2, 1), np.inf) assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14) assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14) assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14) assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf) def test_ellipe(self): ele = special.ellipe(.2) assert_allclose(ele, 1.4890350580958529, atol=1.5e-8, rtol=0) assert_equal(special.ellipe(0.0), pi/2) assert_equal(special.ellipe(1.0), 1.0) assert_equal(special.ellipe(-np.inf), np.inf) assert_equal(special.ellipe(np.nan), np.nan) assert_equal(special.ellipe(2), np.nan) assert_allclose(special.ellipe(-10), 3.6391380384177689) def test_ellipeinc(self): eleinc = special.ellipeinc(pi/2,.2) ele = special.ellipe(0.2) assert_allclose(eleinc, ele, atol=1.5e-14, rtol=0) # pg 617 of A & S alpha, phi = 52*pi/180,35*pi/180 m = sin(alpha)**2 eleinc = special.ellipeinc(phi,m) assert_allclose(eleinc, 0.58823065, atol=1.5e-8, rtol=0) assert_equal(special.ellipeinc(pi/2, 0.0), pi/2) assert_equal(special.ellipeinc(pi/2, 1.0), 1.0) assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf) assert_equal(special.ellipeinc(pi/2, np.nan), np.nan) assert_equal(special.ellipeinc(pi/2, 2), np.nan) assert_equal(special.ellipeinc(0, 0.5), 0.0) assert_equal(special.ellipeinc(np.inf, 0.5), np.inf) assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf) assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf) assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf) assert_equal(special.ellipeinc(np.inf, np.inf), np.nan) assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan) assert_equal(special.ellipeinc(np.nan, 0.5), np.nan) assert_equal(special.ellipeinc(np.nan, np.nan), np.nan) assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876) def test_ellipeinc_2(self): # Regression test for gh-3550 # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value mbad = 0.68359375000000011 phi = 0.9272952180016123 m = np.nextafter(mbad, 0) mvals = [] for j in range(10): mvals.append(m) m = np.nextafter(m, 1) f = special.ellipeinc(phi, mvals) assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2) # this bug also appears at phi + n * pi for at least small n f1 = special.ellipeinc(phi + pi, mvals) assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4)
TestEllip
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1028138, "end": 1028857 }
class ____(sgqlc.types.Type): """Autogenerated return type of UpdateEnterpriseTeamDiscussionsSetting """ __schema__ = github_schema __field_names__ = ("client_mutation_id", "enterprise", "message") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise") """The enterprise with the updated team discussions setting.""" message = sgqlc.types.Field(String, graphql_name="message") """A message confirming the result of updating the team discussions setting. """
UpdateEnterpriseTeamDiscussionsSettingPayload
python
Pylons__pyramid
src/pyramid/authentication.py
{ "start": 27342, "end": 40771 }
class ____: """ A helper class for security policies that obtains data from an "auth ticket" cookie. Constructor Arguments ``secret`` The secret (a string) used for auth_tkt cookie signing. This value should be unique across all values provided to Pyramid for various subsystem secrets (see :ref:`admonishment_against_secret_sharing`). Required. ``cookie_name`` Default: ``auth_tkt``. The cookie name used (string). Optional. ``secure`` Default: ``False``. Only send the cookie back over a secure conn. Optional. ``include_ip`` Default: ``False``. Make the requesting IP address part of the authentication data in the cookie. Optional. For IPv6 this option is not recommended. The ``mod_auth_tkt`` specification does not specify how to handle IPv6 addresses, so using this option in combination with IPv6 addresses may cause an incompatible cookie. It ties the authentication ticket to that individual's IPv6 address. ``timeout`` Default: ``None``. Maximum number of seconds which a newly issued ticket will be considered valid. After this amount of time, the ticket will expire (effectively logging the user out). If this value is ``None``, the ticket never expires. Optional. ``reissue_time`` Default: ``None``. If this parameter is set, it represents the number of seconds that must pass before an authentication token cookie is automatically reissued as the result of a request which requires authentication. The duration is measured as the number of seconds since the last auth_tkt cookie was issued and 'now'. If this value is ``0``, a new ticket cookie will be reissued on every request which requires authentication. A good rule of thumb: if you want auto-expired cookies based on inactivity: set the ``timeout`` value to 1200 (20 mins) and set the ``reissue_time`` value to perhaps a tenth of the ``timeout`` value (120 or 2 mins). It's nonsensical to set the ``timeout`` value lower than the ``reissue_time`` value, as the ticket will never be reissued if so. However, such a configuration is not explicitly prevented. Optional. ``max_age`` Default: ``None``. The max age of the auth_tkt cookie, in seconds. This differs from ``timeout`` inasmuch as ``timeout`` represents the lifetime of the ticket contained in the cookie, while this value represents the lifetime of the cookie itself. When this value is set, the cookie's ``Max-Age`` and ``Expires`` settings will be set, allowing the auth_tkt cookie to last between browser sessions. It is typically nonsensical to set this to a value that is lower than ``timeout`` or ``reissue_time``, although it is not explicitly prevented. Optional. ``path`` Default: ``/``. The path for which the auth_tkt cookie is valid. May be desirable if the application only serves part of a domain. Optional. ``http_only`` Default: ``False``. Hide cookie from JavaScript by setting the HttpOnly flag. Not honored by all browsers. Optional. ``wild_domain`` Default: ``True``. An auth_tkt cookie will be generated for the wildcard domain. If your site is hosted as ``example.com`` this will make the cookie available for sites underneath ``example.com`` such as ``www.example.com``. Optional. ``parent_domain`` Default: ``False``. An auth_tkt cookie will be generated for the parent domain of the current site. For example if your site is hosted under ``www.example.com`` a cookie will be generated for ``.example.com``. This can be useful if you have multiple sites sharing the same domain. This option supercedes the ``wild_domain`` option. Optional. ``domain`` Default: ``None``. If provided the auth_tkt cookie will only be set for this domain. This option is not compatible with ``wild_domain`` and ``parent_domain``. Optional. ``hashalg`` Default: ``sha512`` (the literal string). Any hash algorithm supported by Python's ``hashlib.new()`` function can be used as the ``hashalg``. Cookies generated by different instances of AuthTktAuthenticationPolicy using different ``hashalg`` options are not compatible. Switching the ``hashalg`` will imply that all existing users with a valid cookie will be required to re-login. Optional. ``debug`` Default: ``False``. If ``debug`` is ``True``, log messages to the Pyramid debug logger about the results of various authentication steps. The output from debugging is useful for reporting to maillist or IRC channels when asking for support. Optional. ``samesite`` Default: ``'Lax'``. The 'samesite' option of the session cookie. Set the value to ``None`` to turn off the samesite option. Optional. .. versionchanged:: 2.0 The default ``hashalg`` was changed from ``md5`` to ``sha512``. """ parse_ticket = staticmethod(parse_ticket) # for tests AuthTicket = AuthTicket # for tests BadTicket = BadTicket # for tests now = None # for tests userid_type_decoders = { 'int': int, 'unicode': lambda x: utf_8_decode(x)[0], # bw compat for old cookies 'b64unicode': lambda x: utf_8_decode(b64decode(x))[0], 'b64str': lambda x: b64decode(x), } userid_type_encoders = { int: ('int', str), str: ('b64unicode', lambda x: b64encode(utf_8_encode(x)[0])), bytes: ('b64str', lambda x: b64encode(x)), } def __init__( self, secret, cookie_name='auth_tkt', secure=False, include_ip=False, timeout=None, reissue_time=None, max_age=None, http_only=False, path="/", wild_domain=True, hashalg='sha512', parent_domain=False, domain=None, samesite='Lax', ): self.cookie_profile = CookieProfile( cookie_name=cookie_name, secure=secure, max_age=max_age, httponly=http_only, path=path, serializer=SimpleSerializer(), samesite=samesite, ) self.secret = secret self.cookie_name = cookie_name self.secure = secure self.include_ip = include_ip self.timeout = timeout if timeout is None else int(timeout) self.reissue_time = ( reissue_time if reissue_time is None else int(reissue_time) ) self.max_age = max_age if max_age is None else int(max_age) self.wild_domain = wild_domain self.parent_domain = parent_domain self.domain = domain self.hashalg = hashalg def _get_cookies(self, request, value, max_age=None): if self.domain: domain = self.domain else: cur_domain = request.domain if self.parent_domain and cur_domain.count('.') > 1: domain = cur_domain.split('.', 1)[1] elif self.wild_domain: domain = cur_domain else: domain = None profile = self.cookie_profile(request) kw = {'domains': [domain]} if max_age is not None: kw['max_age'] = max_age headers = profile.get_headers(value, **kw) return headers def identify(self, request): """Return a dictionary with authentication information, or ``None`` if no valid auth_tkt is attached to ``request``""" environ = request.environ cookie = request.cookies.get(self.cookie_name) if cookie is None: return None if self.include_ip: remote_addr = environ['REMOTE_ADDR'] else: remote_addr = '0.0.0.0' try: timestamp, userid, tokens, user_data = self.parse_ticket( self.secret, cookie, remote_addr, self.hashalg ) except self.BadTicket: return None now = self.now # service tests if now is None: now = time_mod.time() if self.timeout and ((timestamp + self.timeout) < now): # the auth_tkt data has expired return None userid_typename = 'userid_type:' user_data_info = user_data.split('|') for datum in filter(None, user_data_info): if datum.startswith(userid_typename): userid_type = datum[len(userid_typename) :] decoder = self.userid_type_decoders.get(userid_type) if decoder: userid = decoder(userid) reissue = self.reissue_time is not None if reissue and not hasattr(request, '_authtkt_reissued'): if (now - timestamp) > self.reissue_time: # See https://github.com/Pylons/pyramid/issues#issue/108 tokens = list(filter(None, tokens)) headers = self.remember( request, userid, max_age=self.max_age, tokens=tokens ) def reissue_authtkt(request, response): if not hasattr(request, '_authtkt_reissue_revoked'): for k, v in headers: response.headerlist.append((k, v)) request.add_response_callback(reissue_authtkt) request._authtkt_reissued = True environ['REMOTE_USER_TOKENS'] = tokens environ['REMOTE_USER_DATA'] = user_data environ['AUTH_TYPE'] = 'cookie' identity = {} identity['timestamp'] = timestamp identity['userid'] = userid identity['tokens'] = tokens identity['userdata'] = user_data return identity def forget(self, request): """Return a set of expires Set-Cookie headers, which will destroy any existing auth_tkt cookie when attached to a response""" request._authtkt_reissue_revoked = True return self._get_cookies(request, None) def remember(self, request, userid, max_age=None, tokens=()): """Return a set of Set-Cookie headers; when set into a response, these headers will represent a valid authentication ticket. ``max_age`` The max age of the auth_tkt cookie, in seconds. When this value is set, the cookie's ``Max-Age`` and ``Expires`` settings will be set, allowing the auth_tkt cookie to last between browser sessions. If this value is ``None``, the ``max_age`` value provided to the helper itself will be used as the ``max_age`` value. Default: ``None``. ``tokens`` A sequence of strings that will be placed into the auth_tkt tokens field. Each string in the sequence must be of the Python ``str`` type and must match the regex ``^[A-Za-z][A-Za-z0-9+_-]*$``. Tokens are available in the returned identity when an auth_tkt is found in the request and unpacked. Default: ``()``. """ max_age = self.max_age if max_age is None else int(max_age) environ = request.environ if self.include_ip: remote_addr = environ['REMOTE_ADDR'] else: remote_addr = '0.0.0.0' user_data = '' encoding_data = self.userid_type_encoders.get(type(userid)) if encoding_data: encoding, encoder = encoding_data else: warnings.warn( "userid is of type {}, and is not supported by the " "AuthTktAuthenticationPolicy. Explicitly converting to string " "and storing as base64. Subsequent requests will receive a " "string as the userid, it will not be decoded back to the " "type provided.".format(type(userid)), RuntimeWarning, ) encoding, encoder = self.userid_type_encoders.get(str) userid = str(userid) userid = encoder(userid) user_data = 'userid_type:%s' % encoding new_tokens = [] for token in tokens: if isinstance(token, str): try: token = ascii_(token) except UnicodeEncodeError: raise ValueError(f"Invalid token {token!r}") if not (isinstance(token, str) and VALID_TOKEN.match(token)): raise ValueError(f"Invalid token {token!r}") new_tokens.append(token) tokens = tuple(new_tokens) if hasattr(request, '_authtkt_reissued'): request._authtkt_reissue_revoked = True ticket = self.AuthTicket( self.secret, userid, remote_addr, tokens=tokens, user_data=user_data, cookie_name=self.cookie_name, secure=self.secure, hashalg=self.hashalg, ) cookie_value = ticket.cookie_value() return self._get_cookies(request, cookie_value, max_age) @implementer(IAuthenticationPolicy)
AuthTktCookieHelper
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/links/test_batch.py
{ "start": 2140, "end": 2977 }
class ____(BaseAwsLinksTestCase): link_class = BatchJobDetailsLink def test_extra_link(self, mock_supervisor_comms): if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms: mock_supervisor_comms.send.return_value = XComResult( key=self.link_class.key, value={ "region_name": "cn-north-1", "aws_domain": self.link_class.get_aws_domain("aws-cn"), "aws_partition": "aws-cn", "job_id": "fake-id", }, ) self.assert_extra_link_url( expected_url="https://console.amazonaws.cn/batch/home?region=cn-north-1#jobs/detail/fake-id", region_name="cn-north-1", aws_partition="aws-cn", job_id="fake-id", )
TestBatchJobDetailsLink
python
aimacode__aima-python
logic4e.py
{ "start": 31810, "end": 32561 }
class ____: def __init__(self, x, y, orientation): self.X = x self.Y = y self.orientation = orientation def get_location(self): return self.X, self.Y def set_location(self, x, y): self.X = x self.Y = y def get_orientation(self): return self.orientation def set_orientation(self, orientation): self.orientation = orientation def __eq__(self, other): if (other.get_location() == self.get_location() and other.get_orientation() == self.get_orientation()): return True else: return False # ______________________________________________________________________________ # 7.7.2 A hybrid agent
WumpusPosition
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
{ "start": 31819, "end": 32186 }
class ____(graphene.Mutation): """Store whether we've shown the nux to any user and they've dismissed or submitted it.""" Output = graphene.NonNull(graphene.Boolean) class Meta: name = "SetNuxSeenMutation" @capture_error def mutate(self, _graphene_info): set_nux_seen() return get_has_seen_nux()
GrapheneSetNuxSeenMutation
python
kamyu104__LeetCode-Solutions
Python/cracking-the-safe.py
{ "start": 33, "end": 609 }
class ____(object): def crackSafe(self, n, k): """ :type n: int :type k: int :rtype: str """ M = k**(n-1) P = [q*k+i for i in xrange(k) for q in xrange(M)] # rotate: i*k^(n-1) + q => q*k + i result = [str(k-1)]*(n-1) for i in xrange(k**n): j = i # concatenation in lexicographic order of Lyndon words while P[j] >= 0: result.append(str(j//M)) P[j], j = -1, P[j] return "".join(result) # Time: O(k^n) # Space: O(k^n)
Solution
python
redis__redis-py
tests/test_asyncio/test_multidb/test_failure_detector.py
{ "start": 412, "end": 5190 }
class ____: @pytest.mark.asyncio @pytest.mark.parametrize( "min_num_failures,failure_rate_threshold,circuit_state", [ (2, 0.4, CBState.OPEN), (2, 0, CBState.OPEN), (0, 0.4, CBState.OPEN), (3, 0.4, CBState.CLOSED), (2, 0.41, CBState.CLOSED), ], ids=[ "exceeds min num failures AND failures rate", "exceeds min num failures AND failures rate == 0", "min num failures == 0 AND exceeds failures rate", "do not exceeds min num failures", "do not exceeds failures rate", ], ) async def test_failure_detector_correctly_reacts_to_failures( self, min_num_failures, failure_rate_threshold, circuit_state ): fd = FailureDetectorAsyncWrapper( CommandFailureDetector(min_num_failures, failure_rate_threshold) ) mock_db = Mock(spec=Database) mock_db.circuit.state = CBState.CLOSED mock_ce = Mock(spec=AsyncCommandExecutor) mock_ce.active_database = mock_db fd.set_command_executor(mock_ce) await fd.register_command_execution(("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) assert mock_db.circuit.state == circuit_state @pytest.mark.asyncio @pytest.mark.parametrize( "min_num_failures,failure_rate_threshold", [ (3, 0.0), (3, 0.6), ], ids=[ "do not exceeds min num failures, during interval", "do not exceeds min num failures AND failure rate, during interval", ], ) async def test_failure_detector_do_not_open_circuit_on_interval_exceed( self, min_num_failures, failure_rate_threshold ): fd = FailureDetectorAsyncWrapper( CommandFailureDetector(min_num_failures, failure_rate_threshold, 0.3) ) mock_db = Mock(spec=Database) mock_db.circuit.state = CBState.CLOSED mock_ce = Mock(spec=AsyncCommandExecutor) mock_ce.active_database = mock_db fd.set_command_executor(mock_ce) assert mock_db.circuit.state == CBState.CLOSED await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) await asyncio.sleep(0.16) await fd.register_command_execution(("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) await asyncio.sleep(0.16) await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) assert mock_db.circuit.state == CBState.CLOSED # 2 more failure as last one already refreshed timer await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) await fd.register_command_execution(("GET", "key")) await fd.register_failure(Exception(), ("GET", "key")) assert mock_db.circuit.state == CBState.OPEN @pytest.mark.asyncio async def test_failure_detector_open_circuit_on_specific_exception_threshold_exceed( self, ): fd = FailureDetectorAsyncWrapper( CommandFailureDetector(5, 1, error_types=[ConnectionError]) ) mock_db = Mock(spec=Database) mock_db.circuit.state = CBState.CLOSED mock_ce = Mock(spec=AsyncCommandExecutor) mock_ce.active_database = mock_db fd.set_command_executor(mock_ce) assert mock_db.circuit.state == CBState.CLOSED await fd.register_failure(Exception(), ("SET", "key1", "value1")) await fd.register_failure(ConnectionError(), ("SET", "key1", "value1")) await fd.register_failure(ConnectionError(), ("SET", "key1", "value1")) await fd.register_failure(Exception(), ("SET", "key1", "value1")) await fd.register_failure(Exception(), ("SET", "key1", "value1")) assert mock_db.circuit.state == CBState.CLOSED await fd.register_failure(ConnectionError(), ("SET", "key1", "value1")) await fd.register_failure(ConnectionError(), ("SET", "key1", "value1")) await fd.register_failure(ConnectionError(), ("SET", "key1", "value1")) assert mock_db.circuit.state == CBState.OPEN
TestFailureDetectorAsyncWrapper
python
more-itertools__more-itertools
tests/test_more.py
{ "start": 174251, "end": 175872 }
class ____(TestCase): def test_r_less_than_n(self): iterable = 'abcdefg' r = 4 first_index = {} for index, element in enumerate(permutations(iterable, r)): actual = mi.permutation_index(element, iterable) expected = first_index.setdefault(element, index) self.assertEqual(actual, expected) def test_r_equal_to_n(self): iterable = 'abcd' first_index = {} for index, element in enumerate(permutations(iterable)): actual = mi.permutation_index(element, iterable) expected = first_index.setdefault(element, index) self.assertEqual(actual, expected) def test_multiplicity(self): iterable = 'abacba' r = 3 first_index = {} for index, element in enumerate(permutations(iterable, r)): actual = mi.permutation_index(element, iterable) expected = first_index.setdefault(element, index) self.assertEqual(actual, expected) def test_null(self): actual = mi.permutation_index(tuple(), []) expected = 0 self.assertEqual(actual, expected) def test_long(self): actual = mi.permutation_index((2, 12, 35, 126), range(180)) expected = 11631678 self.assertEqual(actual, expected) def test_invalid_large(self): with self.assertRaises(ValueError): mi.permutation_index(tuple('abcdefg'), 'abcdef') def test_invalid_match(self): with self.assertRaises(ValueError): mi.permutation_index(tuple('axe'), 'abcde')
PermutationIndexTests
python
mitmproxy__pdoc
test/test_snapshot.py
{ "start": 301, "end": 7433 }
class ____: id: str specs: list[str] render_options: dict with_output_directory: bool min_version: tuple[int, int] warnings: list[str] def __init__( self, id: str, specs: list[str] | None = None, render_options: dict | None = None, with_output_directory: bool = False, min_version: tuple[int, int] = (3, 7), warnings: list[str] | None = None, ): self.id = id self.specs = specs or [f"{id}.py"] self.render_options = render_options or {} self.with_output_directory = with_output_directory self.min_version = min_version self.warnings = warnings or [] def __repr__(self): return f"Snapshot({self.id})" def make(self, format: str) -> str: with ExitStack() as stack: if format != "repr": for w in self.warnings: stack.enter_context(pytest.warns(match=w)) pdoc.render.configure(**self.render_options) pdoc.render.env.globals["__version__"] = "$VERSION" if self.with_output_directory: if format == "repr": return "(skipped)" with tempfile.TemporaryDirectory() as tmpdirname: tmpdir = Path(tmpdirname) # noinspection PyTypeChecker pdoc.pdoc(*self.specs, output_directory=Path(tmpdir)) # type: ignore rendered = "<style>iframe {width: 100%; min-height: 50vh}</style>\n" for f in sorted(tmpdir.glob("**/*"), reverse=True): if not f.is_file(): continue rendered += ( f"<h3>{f.relative_to(tmpdir).as_posix()}</h3>\n" + '<iframe srcdoc="\n' + f.read_text("utf8") .replace("&", "&amp;") .replace('"', "&quot;") + '\n"></iframe>\n\n' ) else: if format == "repr": mod_name = pdoc.extract.walk_specs(self.specs)[0] mod = pdoc.doc.Module.from_name(mod_name) with pdoc.extract.mock_some_common_side_effects(): rendered = pdoc.render.repr_module(mod) else: rendered = pdoc.pdoc(*self.specs) pdoc.render.configure() pdoc.render.env.globals["__version__"] = pdoc.__version__ return rendered def outfile(self, format: str) -> Path: return (snapshot_dir / self.id).with_suffix( { "html": ".html", "repr": ".txt", }[format] ) snapshots = [ Snapshot("ast_parsing"), Snapshot("collections_abc"), Snapshot("demo"), Snapshot("enums", min_version=(3, 13)), Snapshot("flavors_google"), Snapshot("flavors_numpy"), Snapshot("flavors_rst"), Snapshot( "example_customtemplate", ["demo.py"], {"template_directory": here / ".." / "examples" / "custom-template"}, ), Snapshot( "example_darkmode", ["demo.py"], {"template_directory": here / ".." / "examples" / "dark-mode"}, ), Snapshot( "example_mkdocs", ["demo.py"], {"template_directory": here / ".." / "examples" / "mkdocs" / "pdoc-template"}, ), Snapshot("demo_long"), Snapshot("demo_eager"), Snapshot("demopackage", ["demopackage", "!demopackage.child_excluded"]), Snapshot( "demopackage_dir", ["demopackage", "demopackage2", "!demopackage.child_excluded"], render_options={ "edit_url_map": { "demopackage.child_b": "https://gitlab.example.com/foo/bar/-/blob/main/demopackage/child_b", "demopackage.child_c": "https://custom.example.com/demopackage/child_c", "demopackage": "https://github.com/mitmproxy/pdoc/tree/main/test/testdata/demopackage/", } }, with_output_directory=True, ), Snapshot("misc"), Snapshot("misc_py310", min_version=(3, 10)), Snapshot("misc_py312", min_version=(3, 12)), Snapshot("misc_py313", min_version=(3, 13)), Snapshot("misc_py313", min_version=(3, 14)), Snapshot("math_demo", render_options={"math": True}), Snapshot("math_misc", render_options={"math": True}), Snapshot("mermaid_demo", render_options={"mermaid": True}), Snapshot( "render_options", ["render_options", "math_demo"], render_options={ "show_source": False, "logo": "https://placedog.net/500?random", "logo_link": "https://example.com/", "footer_text": "custom footer text", "search": False, "favicon": "https://pdoc.dev/favicon.svg", }, with_output_directory=True, ), Snapshot("pyo3_sample_library", specs=["pdoc_pyo3_sample_library"]), Snapshot("top_level_reimports", ["top_level_reimports"]), Snapshot("type_checking_imports", ["type_checking_imports.main"]), Snapshot("typed_dict", min_version=(3, 13)), Snapshot("type_stubs", ["type_stubs"], min_version=(3, 10)), Snapshot( "visibility", render_options={ "include_undocumented": False, }, ), Snapshot("with_pydantic"), ] @pytest.mark.parametrize("snapshot", snapshots, ids=[x.id for x in snapshots]) @pytest.mark.parametrize("format", ["html", "repr"]) def test_snapshots(snapshot: Snapshot, format: str, monkeypatch): """ Compare pdoc's rendered output against stored snapshots. """ monkeypatch.chdir(snapshot_dir) monkeypatch.setattr(pdoc.search, "node_executable", lambda: None) if sys.version_info < snapshot.min_version: pytest.skip( f"Snapshot only works on Python {'.'.join(str(x) for x in snapshot.min_version)} and above." ) expected = snapshot.outfile(format).read_text("utf8") actual = snapshot.make(format) assert actual == expected, ( f"Rendered output does not match for snapshot {snapshot.id}. " "Run `python3 ./test/test_snapshot.py` to update snapshots." ) if __name__ == "__main__": warnings.simplefilter("error") pdoc.search.node_executable = lambda: None # type: ignore os.chdir(snapshot_dir) skipped_some = False for snapshot in snapshots: if len(sys.argv) > 1 and snapshot.id not in sys.argv: continue if sys.version_info < snapshot.min_version: print( f"Skipping {snapshot} as it requires a more recent version of Python." ) skipped_some = True continue for format in ["html", "repr"]: print(f"Rendering {snapshot} to {format}...") rendered = snapshot.make(format) snapshot.outfile(format).write_bytes(rendered.encode()) print("All snapshots rendered!") sys.exit(int(skipped_some))
Snapshot
python
pennersr__django-allauth
allauth/socialaccount/providers/battlenet/views.py
{ "start": 2059, "end": 4749 }
class ____(OAuth2Adapter): """ OAuth2 adapter for Battle.net https://dev.battle.net/docs/read/oauth Region is set to us by default, but can be overridden with the `region` GET parameter when performing a login. Can be any of eu, us, kr, sea, tw or cn """ provider_id = "battlenet" valid_regions = ( Region.APAC, Region.CN, Region.EU, Region.KR, Region.SEA, Region.TW, Region.US, ) @property def battlenet_region(self): # Check by URI query parameter first. region = self.request.GET.get("region", "").lower() if region == Region.SEA: # South-East Asia uses the same region as US everywhere return Region.US if region in self.valid_regions: return region # Second, check the provider settings. region = ( getattr(settings, "SOCIALACCOUNT_PROVIDERS", {}) .get("battlenet", {}) .get("REGION", "us") ) if region in self.valid_regions: return region return Region.US @property def battlenet_base_url(self): region = self.battlenet_region if region == Region.CN: return "https://oauth.battlenet.com.cn" return "https://oauth.battle.net" @property def access_token_url(self): return self.battlenet_base_url + "/token" @property def authorize_url(self): return self.battlenet_base_url + "/authorize" @property def profile_url(self): return self.battlenet_base_url + "/userinfo" def complete_login(self, request, app, token, **kwargs): response = ( get_adapter() .get_requests_session() .get( self.profile_url, headers={"authorization": "Bearer %s" % (token.token)}, ) ) data = _check_errors(response) # Add the region to the data so that we can have it in `extra_data`. data["region"] = self.battlenet_region return self.get_provider().sociallogin_from_response(request, data) def get_callback_url(self, request, app): r = super(BattleNetOAuth2Adapter, self).get_callback_url(request, app) region = request.GET.get("region", "").lower() # Pass the region down to the callback URL if we specified it if region and region in self.valid_regions: r += "?region=%s" % (region) return r oauth2_login = OAuth2LoginView.adapter_view(BattleNetOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(BattleNetOAuth2Adapter)
BattleNetOAuth2Adapter
python
sphinx-doc__sphinx
sphinx/ext/autodoc/_legacy_class_based/_documenters.py
{ "start": 42451, "end": 43770 }
class ____(Documenter): """Specialized Documenter subclass for objects on class level (methods, attributes). """ def resolve_name( self, modname: str | None, parents: Any, path: str, base: str ) -> tuple[str | None, list[str]]: if modname is not None: return modname, [*parents, base] if path: mod_cls = path.rstrip('.') else: # if documenting a class-level object without path, # there must be a current class, either from a parent # auto directive ... mod_cls = self._current_document.autodoc_class # ... or from a class directive if not mod_cls: mod_cls = self.env.ref_context.get('py:class', '') # ... if still falsy, there's no way to know if not mod_cls: return None, [] modname, _sep, cls = mod_cls.rpartition('.') parents = [cls] # if the module name is still missing, get it like above if not modname: modname = self._current_document.autodoc_module if not modname: modname = self.env.ref_context.get('py:module') # ... else, it stays None, which means invalid return modname, [*parents, base]
ClassLevelDocumenter
python
astropy__astropy
astropy/extern/ply/yacc.py
{ "start": 10276, "end": 53858 }
class ____: def __init__(self, lrtab, errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf self.set_defaulted_states() self.errorok = True def errok(self): self.errorok = True def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) # Defaulted state support. # This method identifies parser states where there is only one possible reduction action. # For such states, the parser can make a choose to make a rule reduction without consuming # the next look-ahead token. This delayed invocation of the tokenizer can be useful in # certain kinds of advanced parsing situations where the lexer and parser interact with # each other or change states (i.e., manipulation of scope, lexer states, etc.). # # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions def set_defaulted_states(self): self.defaulted_states = {} for state, actions in self.action.items(): rules = list(actions.values()) if len(rules) == 1 and rules[0] < 0: self.defaulted_states[state] = rules[0] def disable_defaulted_states(self): self.defaulted_states = {} def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): if debug or yaccdevel: if isinstance(debug, int): debug = PlyLogger(sys.stderr) return self.parsedebug(input, lexer, debug, tracking, tokenfunc) elif tracking: return self.parseopt(input, lexer, debug, tracking, tokenfunc) else: return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. Optimized versions of this function # are automatically created by the ply/ygen.py script. This script cuts out # sections enclosed in markers such as this: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parsedebug-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery #--! DEBUG debug.info('PLY: PARSE DEBUG START') #--! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer #--! DEBUG debug.debug('') debug.debug('State : %s', state) #--! DEBUG if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] #--! DEBUG debug.debug('Defaulted state %s: Reduce using %d', state, -t) #--! DEBUG #--! DEBUG debug.debug('Stack : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t #--! DEBUG debug.debug('Action : Shift and goto state %s', t) #--! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None #--! DEBUG if plen: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', goto[statestack[-1-plen]][pname]) else: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], goto[statestack[-1]][pname]) #--! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) #--! DEBUG debug.info('Done : Returning %s', format_result(result)) debug.info('PLY: PARSE DEBUG END') #--! DEBUG return result if t is None: #--! DEBUG debug.error('Error : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parsedebug-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! # This code is automatically generated by the ply/ygen.py script. Make # changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated # by the ply/ygen.py script. Make changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-notrack-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-notrack-end # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # -----------------------------------------------------------------------------
LRParser
python
huggingface__transformers
examples/modular-transformers/modeling_dummy_bert.py
{ "start": 5644, "end": 8771 }
class ____(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.scaling = self.attention_head_size**-0.5 self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.is_causal = is_causal self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.attention_head_size) # get all proj query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2) if past_key_value is not None: # decoder-only dummy_bert can have a simple dynamic cache for example current_past_key_value = past_key_value if isinstance(past_key_value, EncoderDecoderCache): current_past_key_value = past_key_value.self_attention_cache # save all key/value_layer to cache to be re-used for fast auto-regressive generation key_layer, value_layer = current_past_key_value.update( key_layer, value_layer, self.layer_idx, {"cache_position": cache_position}, ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() return attn_output, attn_weights
DummyBertSelfAttention
python
kamyu104__LeetCode-Solutions
Python/watering-plants-ii.py
{ "start": 29, "end": 803 }
class ____(object): def minimumRefill(self, plants, capacityA, capacityB): """ :type plants: List[int] :type capacityA: int :type capacityB: int :rtype: int """ result = 0 left, right = 0, len(plants)-1 canA, canB = capacityA, capacityB while left < right: if canA < plants[left]: result += 1 canA = capacityA canA -= plants[left] if canB < plants[right]: result += 1 canB = capacityB canB -= plants[right] left, right = left+1, right-1 if left == right: if max(canA, canB) < plants[left]: result += 1 return result
Solution
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 211435, "end": 212433 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, application_id: str, application_secret: str, token: str, start_date: str ): """Airbyte Source for Linnworks. Documentation can be found at https://docs.airbyte.com/integrations/sources/linnworks Args: name (str): The name of the destination. application_id (str): Linnworks Application ID application_secret (str): Linnworks Application Secret start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. """ self.application_id = check.str_param(application_id, "application_id") self.application_secret = check.str_param(application_secret, "application_secret") self.token = check.str_param(token, "token") self.start_date = check.str_param(start_date, "start_date") super().__init__("Linnworks", name)
LinnworksSource
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 25126, "end": 25415 }
class ____(Interface): """A utility which generates a response""" def __call__(request): """Return a response object implementing IResponse, e.g. :class:`pyramid.response.Response`). It should handle the case when ``request`` is ``None``."""
IResponseFactory
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/initsubclass1.py
{ "start": 428, "end": 542 }
class ____(ClassA, param1=0, param2=4): pass # This should generate two errors because param2 is missing.
ClassB
python
google__jax
jax/experimental/mosaic/gpu/core.py
{ "start": 8835, "end": 8986 }
class ____: collective_dims: Sequence[gpu.Dimension] arrival_count: int = 1 num_barriers: int = 1 @dataclasses.dataclass(frozen=True)
ClusterBarrier
python
PrefectHQ__prefect
src/integrations/prefect-snowflake/prefect_snowflake/credentials.py
{ "start": 1115, "end": 1193 }
class ____(Exception): """Invalid PEM Format Certificate"""
InvalidPemFormat
python
mwaskom__seaborn
seaborn/_core/plot.py
{ "start": 4222, "end": 6223 }
class ____(mpl.RcParams): """ Configuration object for the Plot.theme, using matplotlib rc parameters. """ THEME_GROUPS = [ "axes", "figure", "font", "grid", "hatch", "legend", "lines", "mathtext", "markers", "patch", "savefig", "scatter", "xaxis", "xtick", "yaxis", "ytick", ] def __init__(self): super().__init__() self.reset() @property def _default(self) -> dict[str, Any]: return { **self._filter_params(mpl.rcParamsDefault), **axes_style("darkgrid"), **plotting_context("notebook"), "axes.prop_cycle": cycler("color", color_palette("deep")), } def reset(self) -> None: """Update the theme dictionary with seaborn's default values.""" self.update(self._default) def update(self, other: dict[str, Any] | None = None, /, **kwds): """Update the theme with a dictionary or keyword arguments of rc parameters.""" if other is not None: theme = self._filter_params(other) else: theme = {} theme.update(kwds) super().update(theme) def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]: """Restruct to thematic rc params.""" return { k: v for k, v in params.items() if any(k.startswith(p) for p in self.THEME_GROUPS) } def _html_table(self, params: dict[str, Any]) -> list[str]: lines = ["<table>"] for k, v in params.items(): row = f"<tr><td>{k}:</td><td style='text-align:left'>{v!r}</td></tr>" lines.append(row) lines.append("</table>") return lines def _repr_html_(self) -> str: repr = [ "<div style='height: 300px'>", "<div style='border-style: inset; border-width: 2px'>", *self._html_table(self), "</div>", "</div>", ] return "\n".join(repr)
ThemeConfig
python
django__django
django/db/models/lookups.py
{ "start": 28249, "end": 28305 }
class ____(UUIDTextMixin, IEndsWith): pass
UUIDIEndsWith
python
django__django
tests/model_forms/models.py
{ "start": 10079, "end": 10201 }
class ____(models.Model): name = models.CharField(max_length=10) markup = MarkupField()
CustomFieldForExclusionModel
python
neetcode-gh__leetcode
python/0605-can-place-flowers.py
{ "start": 420, "end": 1119 }
class ____: def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool: # Another solution with O(1) space complexity for i in range(len(flowerbed)): if n == 0: return True if ((i == 0 or flowerbed[i - 1] == 0) # If at the first element or the previous element equals to 0 and (flowerbed[i] == 0) # If current element equals to 0 and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0)): # If at the last element or the next element equals to 0 # Place flower at the current position flowerbed[i] = 1 n -= 1 return n == 0
Solution2
python
google__pytype
pytype/overlays/fiddle_overlay.py
{ "start": 9976, "end": 13228 }
class ____(Buildable): """An instantiation of a fiddle.Partial with a particular template.""" def __init__(self, *args, **kwargs): super().__init__("Partial", *args, **kwargs) def _convert_type(typ, subst, ctx): """Helper function for recursive type conversion of fields.""" if isinstance(typ, abstract.TypeParameter) and typ.name in subst: # TODO(mdemello): Handle typevars in unions. typ = subst[typ.name] new_typ = BuildableType.make("Config", typ, ctx, module="fiddle") return abstract.Union([new_typ, typ], ctx) def _make_fields(typ, ctx): """Helper function for recursive type conversion of fields.""" if isinstance(typ, abstract.ParameterizedClass): subst = typ.formal_type_parameters typ = typ.base_cls else: subst = {} if abstract_utils.is_dataclass(typ): fields = [ classgen.Field(x.name, _convert_type(x.typ, subst, ctx), x.default) for x in typ.metadata["__dataclass_fields__"] ] return fields return [] def make_instance( subclass_name: str, underlying: abstract.Class, node, ctx ) -> tuple[Node, abstract.BaseValue]: """Generate a Buildable instance from an underlying template class.""" subclass_name = _CLASS_ALIASES[subclass_name] if subclass_name not in ("Config", "Partial"): raise ValueError(f"Unexpected instance class: {subclass_name}") # We include the root node in case the cache is shared between multiple runs. cache_key = (ctx.root_node, underlying, subclass_name) if cache_key in _INSTANCE_CACHE: return node, _INSTANCE_CACHE[cache_key] _INSTANCE_CACHE[cache_key] = ctx.convert.unsolvable # recursion handling instance_class = {"Config": Config, "Partial": Partial}[subclass_name] # Create the specialized class Config[underlying] or Partial[underlying] try: cls = BuildableType.make(subclass_name, underlying, ctx, module="fiddle") except KeyError: # We are in the middle of constructing the fiddle ast; fiddle.Config doesn't # exist yet return node, ctx.convert.unsolvable # Now create the instance, setting its class to `cls` obj = instance_class(cls, ctx) obj.underlying = underlying fields = _make_fields(underlying, ctx) for f in fields: obj.members[f.name] = f.typ.instantiate(node) # Add a per-instance annotations dict so setattr can be typechecked. obj.members["__annotations__"] = classgen.make_annotations_dict( fields, node, ctx ) _INSTANCE_CACHE[cache_key] = obj return node, obj def is_fiddle_buildable_pytd(cls: pytd.Class) -> bool: # We need the awkward check for the full name because while fiddle reexports # the class as fiddle.Config, we expand that in inferred pyi files to # fiddle._src.config.Config fiddle = re.fullmatch(r"fiddle\.(.+\.)?(Config|Partial)", cls.name) pax = re.fullmatch(r"(.+\.)?pax_fiddle.(Pax)?(Config|Partial)", cls.name) return bool(fiddle or pax) def get_fiddle_buildable_subclass(cls: pytd.Class) -> str: if re.search(r"\.(Pax)?Config$", cls.name): return "Config" if re.search(r"\.(Pax)?Partial$", cls.name): return "Partial" raise ValueError( f"Unexpected {cls.name} when computing fiddle Buildable " "subclass; allowed suffixes are `.Config`, and `.Partial`." )
Partial
python
pyodide__pyodide
src/py/pyodide/console.py
{ "start": 7366, "end": 9047 }
class ____(Future[Any]): # TODO: Figure out proper SKIPIF syntax for Firefox and Safari """ A future with extra fields used as the return value for :py:class:`Console` APIs. Example: >>> from pyodide.console import Console # doctest: +SKIP >>> console = Console() # doctest: +SKIP >>> future = console.push("print('Hello, World!')") # doctest: +SKIP >>> print(future.syntax_check) # doctest: +SKIP complete # doctest: +SKIP >>> import asyncio # doctest: +SKIP >>> result = asyncio.run(future) # doctest: +SKIP Hello, World! # doctest: +SKIP """ syntax_check: ConsoleFutureStatus """ The status of the future. The values mean the following: :'incomplete': Input is incomplete. The future has already been resolved with result ``None``. :'syntax-error': Input contained a syntax error. The future has been rejected with a ``SyntaxError``. :'complete': The input complete and syntactically correct and asynchronous execution has begun. When the execution is done, the Future will be resolved with the result or rejected with an exception. """ formatted_error: str | None """ If the ``Future`` is rejected, this will be filled with a formatted version of the code. This is a convenience that simplifies code and helps to avoid large memory leaks when using from JavaScript. """ def __init__( self, syntax_check: ConsoleFutureStatus, ): super().__init__() self.syntax_check = syntax_check self.formatted_error = None
ConsoleFuture
python
sanic-org__sanic
tests/benchmark/test_route_resolution_benchmark.py
{ "start": 240, "end": 2214 }
class ____: @mark.asyncio async def test_resolve_route_no_arg_string_path( self, sanic_router, route_generator, benchmark ): simple_routes = route_generator.generate_random_direct_route( max_route_depth=4 ) router, simple_routes = sanic_router(route_details=simple_routes) route_to_call = choice(simple_routes) request = Request( f"/{route_to_call[-1]}".encode(), {"host": "localhost"}, "v1", route_to_call[0], None, None, ) result = benchmark.pedantic( router.get, ( request.path, request.method, request.headers.get("host"), ), iterations=1000, rounds=1000, ) assert await result[1](None) == 1 @mark.asyncio async def test_resolve_route_with_typed_args( self, sanic_router, route_generator, benchmark ): typed_routes = route_generator.add_typed_parameters( route_generator.generate_random_direct_route(max_route_depth=4), max_route_depth=8, ) router, typed_routes = sanic_router(route_details=typed_routes) route_to_call = choice(typed_routes) url = route_generator.generate_url_for_template( template=route_to_call[-1] ) print(f"{route_to_call[-1]} -> {url}") request = Request( f"/{url}".encode(), {"host": "localhost"}, "v1", route_to_call[0], None, None, ) result = benchmark.pedantic( router.get, ( request.path, request.method, request.headers.get("host"), ), iterations=1000, rounds=1000, ) assert await result[1](None) == 1
TestSanicRouteResolution
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 3984, "end": 4310 }
class ____(Rule): """integrate(a*f(x), x) -> a*integrate(f(x), x)""" constant: Expr other: Expr substep: Rule def eval(self) -> Expr: return self.constant * self.substep.eval() def contains_dont_know(self) -> bool: return self.substep.contains_dont_know() @dataclass
ConstantTimesRule
python
ray-project__ray
python/ray/data/tests/conftest.py
{ "start": 14239, "end": 17511 }
class ____: def __init__(self, task_count=None, object_store_stats=None, actor_count=None): self.task_count = task_count self.object_store_stats = object_store_stats self.actor_count = actor_count def get_task_count(self): return self.task_count def get_object_store_stats(self): return self.object_store_stats def get_actor_count(self): return self.actor_count def _assert_count_equals(self, actual_count, expected_count): diff = {} # Check that all tasks in expected tasks match those in actual task # count. for name, count in expected_count.items(): if not equals_or_true(actual_count[name], count): diff[name] = (actual_count[name], count) assert len(diff) == 0, "\nTask diff:\n" + "\n".join( f" - {key}: expected {val[1]}, got {val[0]}" for key, val in diff.items() ) def assert_task_metrics(self, expected_metrics): """ Assert equality to the given { <task name>: <task count> }. A lambda that takes in the count and returns a bool to assert can also be given instead of an integer task count. An empty dict means that we expected no tasks to run. Pass None to skip the check. """ if expected_metrics.get_task_count() is None: return expected_task_count = expected_metrics.get_task_count() actual_task_count = self.get_task_count() self._assert_count_equals(actual_task_count, expected_task_count) def assert_object_store_metrics(self, expected_metrics): """ By default this checks that no objects were spilled or restored. Collected stats only apply to plasma store objects and exclude inlined or in-memory objects. Caller can also override the following fields with a value or lambda to assert. - spilled_bytes_total - restored_bytes_total - cumulative_created_plasma_bytes - cumulative_created_plasma_objects """ expected_object_store_stats = ( CoreExecutionMetrics.get_default_object_store_stats() ) if expected_metrics.get_object_store_stats() is not None: for key, val in expected_metrics.get_object_store_stats().items(): expected_object_store_stats[key] = val actual_object_store_stats = self.get_object_store_stats() for key, val in expected_object_store_stats.items(): print(f"{key}: Expect {val}, got {actual_object_store_stats[key]}") assert equals_or_true( actual_object_store_stats[key], val ), f"{key}: expected {val} got {actual_object_store_stats[key]}" def assert_actor_metrics(self, expected_metrics): if expected_metrics.get_actor_count() is None: return expected_actor_count = expected_metrics.get_actor_count() actual_actor_count = self.get_actor_count() self._assert_count_equals(actual_actor_count, expected_actor_count) @staticmethod def get_default_object_store_stats(): return { "spilled_bytes_total": 0, "restored_bytes_total": 0, }
CoreExecutionMetrics
python
run-llama__llama_index
llama-index-core/tests/memory/test_memory_blocks_base.py
{ "start": 226, "end": 573 }
class ____(BaseMemoryBlock[str]): """Memory block that returns text content.""" async def _aget(self, messages: List[ChatMessage], **kwargs: Any) -> str: return "Simple text content from TextMemoryBlock" async def _aput(self, messages: List[ChatMessage]) -> None: # Just a no-op for testing pass
TextMemoryBlock
python
crytic__slither
slither/detectors/operations/unchecked_send_return_value.py
{ "start": 314, "end": 1411 }
class ____(UnusedReturnValues): """ If the return value of a send is not checked, it might lead to losing ether """ ARGUMENT = "unchecked-send" HELP = "Unchecked send" IMPACT = DetectorClassification.MEDIUM CONFIDENCE = DetectorClassification.MEDIUM WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unchecked-send" WIKI_TITLE = "Unchecked Send" WIKI_DESCRIPTION = "The return value of a `send` is not checked." # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract MyConc{ function my_func(address payable dst) public payable{ dst.send(msg.value); } } ``` The return value of `send` is not checked, so if the send fails, the Ether will be locked in the contract. If `send` is used to prevent blocking operations, consider logging the failed `send`. """ # endregion wiki_exploit_scenario WIKI_RECOMMENDATION = "Ensure that the return value of `send` is checked or logged." def _is_instance(self, ir: Operation) -> bool: return isinstance(ir, Send)
UncheckedSend
python
pennersr__django-allauth
allauth/account/models.py
{ "start": 6922, "end": 10994 }
class ____: """ Represents a user that is in the process of logging in. Keyword arguments: signup -- Indicates whether or not sending the email is essential (during signup), or if it can be skipped (e.g. in case email verification is optional and we are only logging in). """ # Optional, because we might be prentending logins to prevent user # enumeration. user: Optional[AbstractBaseUser] email_verification: app_settings.EmailVerificationMethod signal_kwargs: Optional[Dict] signup: bool email: Optional[str] phone: Optional[str] state: Dict initiated_at: float redirect_url: Optional[str] def __init__( self, user, email_verification: Optional[app_settings.EmailVerificationMethod] = None, redirect_url: Optional[str] = None, signal_kwargs: Optional[Dict] = None, signup: bool = False, email: Optional[str] = None, state: Optional[Dict] = None, initiated_at: Optional[float] = None, phone: Optional[str] = None, ): self.user = user if not email_verification: email_verification = app_settings.EMAIL_VERIFICATION self.email_verification = email_verification self.redirect_url = redirect_url self.signal_kwargs = signal_kwargs self.signup = signup self.email = email self.phone = phone self.state = {} if state is None else state self.initiated_at = initiated_at if initiated_at else time.time() def serialize(self): from allauth.account.utils import user_pk_to_url_str # :-( Knowledge of the `socialaccount` is entering the `account` app. signal_kwargs = self.signal_kwargs if signal_kwargs is not None: sociallogin = signal_kwargs.get("sociallogin") if sociallogin is not None: signal_kwargs = signal_kwargs.copy() signal_kwargs["sociallogin"] = sociallogin.serialize() data = { "user_pk": user_pk_to_url_str(self.user) if self.user else None, "email_verification": self.email_verification, "signup": self.signup, "redirect_url": self.redirect_url, "email": self.email, "phone": self.phone, "signal_kwargs": signal_kwargs, "state": self.state, "initiated_at": self.initiated_at, } return data @classmethod def deserialize(cls, data): from allauth.account.utils import url_str_to_user_pk user = None user_pk = data["user_pk"] if user_pk is not None: user = ( get_user_model().objects.filter(pk=url_str_to_user_pk(user_pk)).first() ) try: # :-( Knowledge of the `socialaccount` is entering the `account` app. signal_kwargs = data["signal_kwargs"] if signal_kwargs is not None: sociallogin = signal_kwargs.get("sociallogin") if sociallogin is not None: from allauth.socialaccount.models import SocialLogin signal_kwargs = signal_kwargs.copy() signal_kwargs["sociallogin"] = SocialLogin.deserialize(sociallogin) return Login( user=user, email_verification=data["email_verification"], redirect_url=data["redirect_url"], email=data["email"], phone=data["phone"], signup=data["signup"], signal_kwargs=signal_kwargs, state=data["state"], initiated_at=data["initiated_at"], ) except KeyError: raise ValueError() def get_emailconfirmation_model(): if app_settings.EMAIL_VERIFICATION_BY_CODE_ENABLED: raise NotImplementedError elif app_settings.EMAIL_CONFIRMATION_HMAC: model = EmailConfirmationHMAC else: model = EmailConfirmation return model
Login
python
getsentry__sentry
src/sentry/web/frontend/debug/debug_new_user_feedback_email.py
{ "start": 402, "end": 1635 }
class ____(View): def get(self, request: HttpRequest) -> HttpResponse: org = Organization(id=1, slug="organization", name="My Company") project = Project(id=1, organization=org, slug="project", name="My Project") event = create_sample_event( project=project, platform="python", event_id="595", timestamp=1452683305 ) group = event.group link = absolute_uri( f"/{project.organization.slug}/{project.slug}/issues/{group.id}/feedback/" ) return MailPreview( html_template="sentry/emails/activity/new-user-feedback.html", text_template="sentry/emails/activity/new-user-feedback.txt", context={ "group": group, "report": { "name": "Homer Simpson", "email": "homer.simpson@example.com", "comments": "I hit a bug.\n\nI went to https://example.com, hit the any key, and then it stopped working. DOH!", }, "link": link, "reason": "are subscribed to this issue", "enhanced_privacy": False, }, ).render(request)
DebugNewUserFeedbackEmailView
python
spyder-ide__spyder
spyder/plugins/debugger/plugin.py
{ "start": 1541, "end": 24083 }
class ____(SpyderDockablePlugin, ShellConnectPluginMixin, RunExecutor): """Debugger plugin.""" NAME = 'debugger' REQUIRES = [Plugins.IPythonConsole, Plugins.Preferences, Plugins.Run] OPTIONAL = [Plugins.Editor, Plugins.MainMenu, Plugins.Toolbar] TABIFY = [Plugins.VariableExplorer, Plugins.Help] WIDGET_CLASS = DebuggerWidget CONF_SECTION = NAME CONF_FILE = False CONF_WIDGET_CLASS = DebuggerConfigPage DISABLE_ACTIONS_WHEN_HIDDEN = False # ---- SpyderDockablePlugin API # ------------------------------------------------------------------------ @staticmethod def get_name(): return _('Debugger') @staticmethod def get_description(): return _('View, explore and navigate stack frames while debugging.') @classmethod def get_icon(cls): return cls.create_icon('debug') def on_initialize(self): widget = self.get_widget() widget.sig_pdb_state_changed.connect( self._update_current_codeeditor_pdb_state) widget.sig_toggle_breakpoints.connect(self._set_or_clear_breakpoint) widget.sig_toggle_conditional_breakpoints.connect( self._set_or_edit_conditional_breakpoint) widget.sig_clear_all_breakpoints.connect(self.clear_all_breakpoints) widget.sig_load_pdb_file.connect(self._load_pdb_file_in_editor) widget.sig_clear_breakpoint.connect(self.clear_breakpoint) widget.sig_switch_to_plugin_requested.connect(self.switch_to_plugin) self.python_editor_run_configuration = { 'origin': self.NAME, 'extension': 'py', 'contexts': [ {'name': 'File'}, {'name': 'Cell'}, {'name': 'Selection'}, ] } self.ipython_editor_run_configuration = { 'origin': self.NAME, 'extension': 'ipy', 'contexts': [ {'name': 'File'}, {'name': 'Cell'}, {'name': 'Selection'}, ] } self.pyw_editor_run_configuration = { 'origin': self.NAME, 'extension': 'pyw', 'contexts': [ {'name': 'File'}, {'name': 'Cell'}, {'name': 'Selection'}, ] } self.executor_configuration = [ { 'input_extension': 'py', 'context': {'name': 'File'}, 'output_formats': [], 'configuration_widget': IPythonConfigOptions, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'ipy', 'context': {'name': 'File'}, 'output_formats': [], 'configuration_widget': IPythonConfigOptions, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'pyw', 'context': {'name': 'File'}, 'output_formats': [], 'configuration_widget': IPythonConfigOptions, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'py', 'context': {'name': 'Cell'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'ipy', 'context': {'name': 'Cell'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'pyw', 'context': {'name': 'Cell'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'py', 'context': {'name': 'Selection'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'ipy', 'context': {'name': 'Selection'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, { 'input_extension': 'pyw', 'context': {'name': 'Selection'}, 'output_formats': [], 'configuration_widget': None, 'requires_cwd': True, 'priority': 10 }, ] def on_mainwindow_visible(self): self.get_widget().update_splitter_widths(self.get_widget().width()) @on_plugin_available(plugin=Plugins.Run) def on_run_available(self): run = self.get_plugin(Plugins.Run) run.register_executor_configuration(self, self.executor_configuration) run.create_run_in_executor_button( RunContext.File, self.NAME, text=_("&Debug file"), tip=_("Debug file"), icon=self.create_icon('debug'), shortcut_context="_", register_shortcut=True, add_to_menu={ "menu": ApplicationMenus.Debug, "section": DebugMenuSections.StartDebug, "before_section": DebugMenuSections.ControlDebug }, add_to_toolbar={ "toolbar": ApplicationToolbars.Debug, "before": DebuggerWidgetActions.Next, }, shortcut_widget_context=Qt.ApplicationShortcut, ) run.create_run_in_executor_button( RunContext.Cell, self.NAME, text=_("Debug cell"), tip=_("Debug cell"), icon=self.create_icon('debug_cell'), shortcut_context=self.NAME, register_shortcut=True, add_to_menu={ "menu": ApplicationMenus.Debug, "section": DebugMenuSections.StartDebug, "before_section": DebugMenuSections.ControlDebug }, add_to_toolbar={ "toolbar": ApplicationToolbars.Debug, "before": DebuggerWidgetActions.Next, }, ) run.create_run_in_executor_button( RunContext.Selection, self.NAME, text=_("Debug the current line or selection"), tip=_("Debug the current line or selection"), icon=self.create_icon('debug_selection'), shortcut_context=self.NAME, register_shortcut=True, add_to_menu={ "menu": ApplicationMenus.Debug, "section": DebugMenuSections.StartDebug, "before_section": DebugMenuSections.ControlDebug }, add_to_toolbar={ "toolbar": ApplicationToolbars.Debug, "before": DebuggerWidgetActions.Next, }, ) @on_plugin_teardown(plugin=Plugins.Run) def on_run_teardown(self): run = self.get_plugin(Plugins.Run) run.deregister_executor_configuration( self, self.executor_configuration ) run.destroy_run_in_executor_button(RunContext.File, self.NAME) run.destroy_run_in_executor_button(RunContext.Cell, self.NAME) run.destroy_run_in_executor_button(RunContext.Selection, self.NAME) @on_plugin_available(plugin=Plugins.Preferences) def on_preferences_available(self): preferences = self.get_plugin(Plugins.Preferences) preferences.register_plugin_preferences(self) @on_plugin_teardown(plugin=Plugins.Preferences) def on_preferences_teardown(self): preferences = self.get_plugin(Plugins.Preferences) preferences.deregister_plugin_preferences(self) @on_plugin_available(plugin=Plugins.Editor) def on_editor_available(self): editor = self.get_plugin(Plugins.Editor) widget = self.get_widget() for run_config in [ self.python_editor_run_configuration, self.ipython_editor_run_configuration, self.pyw_editor_run_configuration ]: editor.add_supported_run_configuration(run_config) # The editor is available, connect signals. widget.sig_edit_goto.connect(editor.load) editor.sig_codeeditor_created.connect(self._add_codeeditor) editor.sig_codeeditor_changed.connect(self._update_codeeditor) editor.sig_codeeditor_deleted.connect(self._remove_codeeditor) # Apply shortcuts to editor and add actions to pythonfile list editor_shortcuts = [ DebuggerBreakpointActions.ToggleBreakpoint, DebuggerBreakpointActions.ToggleConditionalBreakpoint, DebuggerBreakpointActions.ShowBreakpointsTable, ] for name in editor_shortcuts: action = self.get_action(name) # TODO: This should be handled differently? editor.get_widget().pythonfile_dependent_actions += [action] @on_plugin_teardown(plugin=Plugins.Editor) def on_editor_teardown(self): editor = self.get_plugin(Plugins.Editor) widget = self.get_widget() for run_config in [ self.python_editor_run_configuration, self.ipython_editor_run_configuration, self.pyw_editor_run_configuration ]: editor.remove_supported_run_configuration(run_config) widget.sig_edit_goto.disconnect(editor.load) editor.sig_codeeditor_created.disconnect(self._add_codeeditor) editor.sig_codeeditor_changed.disconnect(self._update_codeeditor) editor.sig_codeeditor_deleted.disconnect(self._remove_codeeditor) # Remove editor actions editor_shortcuts = [ DebuggerBreakpointActions.ToggleBreakpoint, DebuggerBreakpointActions.ToggleConditionalBreakpoint, DebuggerBreakpointActions.ShowBreakpointsTable, ] for name in editor_shortcuts: action = self.get_action(name) if action in editor.get_widget().pythonfile_dependent_actions: editor.get_widget().pythonfile_dependent_actions.remove(action) @on_plugin_available(plugin=Plugins.MainMenu) def on_main_menu_available(self): mainmenu = self.get_plugin(Plugins.MainMenu) # ControlDebug section for action in [DebuggerWidgetActions.Next, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Stop]: mainmenu.add_item_to_application_menu( self.get_action(action), menu_id=ApplicationMenus.Debug, section=DebugMenuSections.ControlDebug, before_section=DebugMenuSections.EditBreakpoints) # Breakpoints section for action in [DebuggerBreakpointActions.ToggleBreakpoint, DebuggerBreakpointActions.ToggleConditionalBreakpoint, DebuggerBreakpointActions.ClearAllBreakpoints, DebuggerBreakpointActions.ShowBreakpointsTable]: mainmenu.add_item_to_application_menu( self.get_action(action), menu_id=ApplicationMenus.Debug, section=DebugMenuSections.EditBreakpoints) @on_plugin_teardown(plugin=Plugins.MainMenu) def on_main_menu_teardown(self): mainmenu = self.get_plugin(Plugins.MainMenu) names = [ DebuggerWidgetActions.Next, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Stop, DebuggerBreakpointActions.ToggleBreakpoint, DebuggerBreakpointActions.ToggleConditionalBreakpoint, DebuggerBreakpointActions.ClearAllBreakpoints, DebuggerBreakpointActions.ShowBreakpointsTable, ] for name in names: mainmenu.remove_item_from_application_menu( name, menu_id=ApplicationMenus.Debug ) @on_plugin_available(plugin=Plugins.Toolbar) def on_toolbar_available(self): toolbar = self.get_plugin(Plugins.Toolbar) for action_id in [ DebuggerWidgetActions.Next, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Stop, ]: toolbar.add_item_to_application_toolbar( self.get_action(action_id), toolbar_id=ApplicationToolbars.Debug, ) debug_toolbar = toolbar.get_application_toolbar( ApplicationToolbars.Debug ) debug_toolbar.sig_is_rendered.connect( self.get_widget().on_debug_toolbar_rendered ) @on_plugin_teardown(plugin=Plugins.Toolbar) def on_toolbar_teardown(self): toolbar = self.get_plugin(Plugins.Toolbar) for action_id in [ DebuggerWidgetActions.Next, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Stop, ]: toolbar.remove_item_from_application_toolbar( action_id, toolbar_id=ApplicationToolbars.Debug, ) # ---- Private API # ------------------------------------------------------------------------ def _load_pdb_file_in_editor(self, fname, lineno): """Load file using processevents.""" editor = self.get_plugin(Plugins.Editor) if editor is None: return # Prevent keyboard input from accidentally entering the # editor during repeated, rapid entry of debugging commands. editor.load(fname, lineno, processevents=False) def _is_python_editor(self, codeeditor): """Check if the editor is a python editor.""" if codeeditor.filename is None: return False txt = codeeditor.get_text_with_eol() language = get_file_language(codeeditor.filename, txt) return language.lower() in ALL_LANGUAGES["Python"] def _connect_codeeditor(self, codeeditor): """Connect a code editor.""" codeeditor.breakpoints_manager = BreakpointsManager(codeeditor) codeeditor.breakpoints_manager.sig_breakpoints_saved.connect( self.get_widget().sig_breakpoints_saved) def _disconnect_codeeditor(self, codeeditor): """Connect a code editor.""" codeeditor.breakpoints_manager.sig_breakpoints_saved.disconnect( self.get_widget().sig_breakpoints_saved) codeeditor.breakpoints_manager = None @Slot(str) def _filename_changed(self, filename): """Change filename.""" codeeditor = self._get_editor_for_filename(filename) if codeeditor is None: return if codeeditor.breakpoints_manager is None: # Was not a python editor if self._is_python_editor(codeeditor): self._connect_codeeditor(codeeditor) else: # Was a python editor if self._is_python_editor(codeeditor): codeeditor.breakpoints_manager.set_filename(filename) else: self._disconnect_codeeditor(codeeditor) @Slot(object) def _add_codeeditor(self, codeeditor): """ Add a new codeeditor. """ codeeditor.sig_filename_changed.connect(self._filename_changed) codeeditor.breakpoints_manager = None if self._is_python_editor(codeeditor): self._connect_codeeditor(codeeditor) @Slot(object) def _remove_codeeditor(self, codeeditor): """ Remove a codeeditor. """ codeeditor.sig_filename_changed.disconnect(self._filename_changed) if codeeditor.breakpoints_manager is not None: self._disconnect_codeeditor(codeeditor) @Slot(object) def _update_codeeditor(self, codeeditor): """ Focus codeeditor has changed. """ if ( codeeditor.filename is None or codeeditor.breakpoints_manager is None ): return # Update debugging state widget = self.get_widget() pdb_state = widget.get_pdb_state() filename, lineno = widget.get_pdb_last_step() codeeditor.breakpoints_manager.update_pdb_state( pdb_state, filename, lineno) @Slot(bool) def _update_current_codeeditor_pdb_state(self, pdb_state): """ The pdb state has changed. """ try: codeeditor = self._get_current_editor() if codeeditor is None or codeeditor.breakpoints_manager is None: return filename, line_number = self.get_widget().get_pdb_last_step() codeeditor.breakpoints_manager.update_pdb_state( pdb_state, filename, line_number) except RuntimeError: pass def _get_current_editor(self): """ Get current codeeditor. """ editor = self.get_plugin(Plugins.Editor) if editor is None: return None return editor.get_current_editor() def _get_editor_for_filename(self, filename): """Get editor for filename.""" editor = self.get_plugin(Plugins.Editor) if editor is None: return None return editor.get_codeeditor_for_filename(filename) def _get_current_editorstack(self): """ Get current editorstack. """ editor = self.get_plugin(Plugins.Editor) if editor is None: return None return editor.get_current_editorstack() @Slot() def _set_or_clear_breakpoint(self): """Toggle breakpoint.""" codeeditor = self._get_current_editor() if codeeditor is None or codeeditor.breakpoints_manager is None: return codeeditor.breakpoints_manager.toogle_breakpoint() @Slot() def _set_or_edit_conditional_breakpoint(self): """Set/edit conditional breakpoint.""" codeeditor = self._get_current_editor() if codeeditor is None or codeeditor.breakpoints_manager is None: return codeeditor.breakpoints_manager.toogle_breakpoint( edit_condition=True) # ---- Public API # ------------------------------------------------------------------------ @Slot() def clear_all_breakpoints(self): """Clear breakpoints in all files""" clear_all_breakpoints() self.get_widget().sig_breakpoints_saved.emit() editorstack = self._get_current_editorstack() if editorstack is not None: for data in editorstack.data: if data.editor.breakpoints_manager is not None: data.editor.breakpoints_manager.clear_breakpoints() @Slot(str, int) def clear_breakpoint(self, filename, lineno): """Remove a single breakpoint""" clear_breakpoint(filename, lineno) self.get_widget().sig_breakpoints_saved.emit() codeeditor = self._get_editor_for_filename(filename) if codeeditor is None or codeeditor.breakpoints_manager is None: return None codeeditor.breakpoints_manager.toogle_breakpoint(lineno) def can_close_file(self, filename=None): """ Check if a file can be closed taking into account debugging state. """ if not self.get_conf('pdb_prevent_closing'): return True widget = self.get_widget() debugging = widget.get_pdb_state() if not debugging: return True pdb_fname, __ = widget.get_pdb_last_step() if pdb_fname and filename: if osp.normcase(pdb_fname) == osp.normcase(filename): widget.print_debug_file_msg() return False return True widget.print_debug_file_msg() return False # ---- For execution @run_execute(context=RunContext.File) def debug_file( self, input: RunConfiguration, conf: ExtendedRunExecutionParameters ) -> List[RunResult]: console = self.get_plugin(Plugins.IPythonConsole) if console is None: return exec_params = conf['params'] params: IPythonConsolePyConfiguration = exec_params['executor_params'] params["run_method"] = "debugfile" console.exec_files(input, conf) self.get_widget().set_pdb_take_focus(False) @run_execute(context=RunContext.Cell) def debug_cell( self, input: RunConfiguration, conf: ExtendedRunExecutionParameters ) -> List[RunResult]: console = self.get_plugin(Plugins.IPythonConsole) if console is None: return run_input: CellRun = input['run_input'] if run_input['copy']: code = run_input['cell'] if not code.strip(): # Empty cell return console.run_selection("%%debug\n" + code) return exec_params = conf['params'] params: IPythonConsolePyConfiguration = exec_params['executor_params'] params["run_method"] = "debugcell" console.exec_cell(input, conf) self.get_widget().set_pdb_take_focus(False) @run_execute(context=RunContext.Selection) def debug_selection( self, input: RunConfiguration, conf: ExtendedRunExecutionParameters ) -> List[RunResult]: console = self.get_plugin(Plugins.IPythonConsole) if console is None: return run_input: SelectionRun = input['run_input'] code = run_input['selection'] if not code.strip(): # No selection return run_input['selection'] = "%%debug\n" + code console.exec_selection(input, conf) self.get_widget().set_pdb_take_focus(False)
Debugger
python
pytorch__pytorch
torch/distributions/mixture_same_family.py
{ "start": 317, "end": 8689 }
class ____(Distribution): r""" The `MixtureSameFamily` distribution implements a (batch of) mixture distribution where all component are from different parameterizations of the same distribution type. It is parameterized by a `Categorical` "selecting distribution" (over `k` component) and a component distribution, i.e., a `Distribution` with a rightmost batch shape (equal to `[k]`) which indexes each (batch of) component. Examples:: >>> # xdoctest: +SKIP("undefined vars") >>> # Construct Gaussian Mixture Model in 1D consisting of 5 equally >>> # weighted normal distributions >>> mix = D.Categorical(torch.ones(5,)) >>> comp = D.Normal(torch.randn(5,), torch.rand(5,)) >>> gmm = MixtureSameFamily(mix, comp) >>> # Construct Gaussian Mixture Model in 2D consisting of 5 equally >>> # weighted bivariate normal distributions >>> mix = D.Categorical(torch.ones(5,)) >>> comp = D.Independent(D.Normal( ... torch.randn(5,2), torch.rand(5,2)), 1) >>> gmm = MixtureSameFamily(mix, comp) >>> # Construct a batch of 3 Gaussian Mixture Models in 2D each >>> # consisting of 5 random weighted bivariate normal distributions >>> mix = D.Categorical(torch.rand(3,5)) >>> comp = D.Independent(D.Normal( ... torch.randn(3,5,2), torch.rand(3,5,2)), 1) >>> gmm = MixtureSameFamily(mix, comp) Args: mixture_distribution: `torch.distributions.Categorical`-like instance. Manages the probability of selecting component. The number of categories must match the rightmost batch dimension of the `component_distribution`. Must have either scalar `batch_shape` or `batch_shape` matching `component_distribution.batch_shape[:-1]` component_distribution: `torch.distributions.Distribution`-like instance. Right-most batch dimension indexes component. """ arg_constraints: dict[str, constraints.Constraint] = {} has_rsample = False def __init__( self, mixture_distribution: Categorical, component_distribution: Distribution, validate_args: Optional[bool] = None, ) -> None: self._mixture_distribution = mixture_distribution self._component_distribution = component_distribution if not isinstance(self._mixture_distribution, Categorical): raise ValueError( " The Mixture distribution needs to be an " " instance of torch.distributions.Categorical" ) if not isinstance(self._component_distribution, Distribution): raise ValueError( "The Component distribution need to be an " "instance of torch.distributions.Distribution" ) # Check that batch size matches mdbs = self._mixture_distribution.batch_shape cdbs = self._component_distribution.batch_shape[:-1] for size1, size2 in zip(reversed(mdbs), reversed(cdbs)): if size1 != 1 and size2 != 1 and size1 != size2: raise ValueError( f"`mixture_distribution.batch_shape` ({mdbs}) is not " "compatible with `component_distribution." f"batch_shape`({cdbs})" ) # Check that the number of mixture component matches km = self._mixture_distribution.logits.shape[-1] kc = self._component_distribution.batch_shape[-1] if km is not None and kc is not None and km != kc: raise ValueError( f"`mixture_distribution component` ({km}) does not" " equal `component_distribution.batch_shape[-1]`" f" ({kc})" ) self._num_component = km event_shape = self._component_distribution.event_shape self._event_ndims = len(event_shape) super().__init__( batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args ) def expand(self, batch_shape, _instance=None): batch_shape = torch.Size(batch_shape) batch_shape_comp = batch_shape + (self._num_component,) new = self._get_checked_instance(MixtureSameFamily, _instance) new._component_distribution = self._component_distribution.expand( batch_shape_comp ) new._mixture_distribution = self._mixture_distribution.expand(batch_shape) new._num_component = self._num_component new._event_ndims = self._event_ndims event_shape = new._component_distribution.event_shape super(MixtureSameFamily, new).__init__( batch_shape=batch_shape, event_shape=event_shape, validate_args=False ) new._validate_args = self._validate_args return new @constraints.dependent_property # pyrefly: ignore [bad-override] def support(self): return MixtureSameFamilyConstraint(self._component_distribution.support) @property def mixture_distribution(self) -> Categorical: return self._mixture_distribution @property def component_distribution(self) -> Distribution: return self._component_distribution @property def mean(self) -> Tensor: probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) return torch.sum( probs * self.component_distribution.mean, dim=-1 - self._event_ndims ) # [B, E] @property def variance(self) -> Tensor: # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]) probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) mean_cond_var = torch.sum( probs * self.component_distribution.variance, dim=-1 - self._event_ndims ) var_cond_mean = torch.sum( probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0), dim=-1 - self._event_ndims, ) return mean_cond_var + var_cond_mean def cdf(self, x): x = self._pad(x) cdf_x = self.component_distribution.cdf(x) mix_prob = self.mixture_distribution.probs return torch.sum(cdf_x * mix_prob, dim=-1) def log_prob(self, x): if self._validate_args: self._validate_sample(x) x = self._pad(x) log_prob_x = self.component_distribution.log_prob(x) # [S, B, k] log_mix_prob = torch.log_softmax( self.mixture_distribution.logits, dim=-1 ) # [B, k] return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B] def sample(self, sample_shape=torch.Size()): with torch.no_grad(): sample_len = len(sample_shape) batch_len = len(self.batch_shape) gather_dim = sample_len + batch_len es = self.event_shape # mixture samples [n, B] mix_sample = self.mixture_distribution.sample(sample_shape) mix_shape = mix_sample.shape # component samples [n, B, k, E] comp_samples = self.component_distribution.sample(sample_shape) # Gather along the k dimension mix_sample_r = mix_sample.reshape( mix_shape + torch.Size([1] * (len(es) + 1)) ) mix_sample_r = mix_sample_r.repeat( torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es ) samples = torch.gather(comp_samples, gather_dim, mix_sample_r) return samples.squeeze(gather_dim) def _pad(self, x): return x.unsqueeze(-1 - self._event_ndims) def _pad_mixture_dimensions(self, x): dist_batch_ndims = len(self.batch_shape) cat_batch_ndims = len(self.mixture_distribution.batch_shape) pad_ndims = 0 if cat_batch_ndims == 1 else dist_batch_ndims - cat_batch_ndims xs = x.shape x = x.reshape( xs[:-1] + torch.Size(pad_ndims * [1]) + xs[-1:] + torch.Size(self._event_ndims * [1]) ) return x def __repr__(self): args_string = ( f"\n {self.mixture_distribution},\n {self.component_distribution}" ) return "MixtureSameFamily" + "(" + args_string + ")"
MixtureSameFamily
python
qdrant__qdrant-client
qdrant_client/client_base.py
{ "start": 132, "end": 12301 }
class ____: def __init__(self, **kwargs: Any): pass def search_matrix_offsets( self, collection_name: str, query_filter: Optional[types.Filter] = None, limit: int = 3, sample: int = 10, using: Optional[str] = None, **kwargs: Any, ) -> types.SearchMatrixOffsetsResponse: raise NotImplementedError() def search_matrix_pairs( self, collection_name: str, query_filter: Optional[types.Filter] = None, limit: int = 3, sample: int = 10, using: Optional[str] = None, **kwargs: Any, ) -> types.SearchMatrixPairsResponse: raise NotImplementedError() def query_batch_points( self, collection_name: str, requests: Sequence[types.QueryRequest], **kwargs: Any, ) -> list[types.QueryResponse]: raise NotImplementedError() def query_points( self, collection_name: str, query: Union[ types.PointId, list[float], list[list[float]], types.SparseVector, types.Query, types.NumpyArray, types.Document, types.Image, types.InferenceObject, None, ] = None, using: Optional[str] = None, prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, query_filter: Optional[types.Filter] = None, search_params: Optional[types.SearchParams] = None, limit: int = 10, offset: Optional[int] = None, with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, score_threshold: Optional[float] = None, lookup_from: Optional[types.LookupLocation] = None, **kwargs: Any, ) -> types.QueryResponse: raise NotImplementedError() def query_points_groups( self, collection_name: str, group_by: str, query: Union[ types.PointId, list[float], list[list[float]], types.SparseVector, types.Query, types.NumpyArray, types.Document, types.Image, types.InferenceObject, None, ] = None, using: Optional[str] = None, prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, query_filter: Optional[types.Filter] = None, search_params: Optional[types.SearchParams] = None, limit: int = 10, group_size: int = 3, with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, score_threshold: Optional[float] = None, with_lookup: Optional[types.WithLookupInterface] = None, lookup_from: Optional[types.LookupLocation] = None, **kwargs: Any, ) -> types.GroupsResult: raise NotImplementedError() def scroll( self, collection_name: str, scroll_filter: Optional[types.Filter] = None, limit: int = 10, order_by: Optional[types.OrderBy] = None, offset: Optional[types.PointId] = None, with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, **kwargs: Any, ) -> tuple[list[types.Record], Optional[types.PointId]]: raise NotImplementedError() def count( self, collection_name: str, count_filter: Optional[types.Filter] = None, exact: bool = True, **kwargs: Any, ) -> types.CountResult: raise NotImplementedError() def facet( self, collection_name: str, key: str, facet_filter: Optional[types.Filter] = None, limit: int = 10, exact: bool = False, **kwargs: Any, ) -> types.FacetResponse: raise NotImplementedError() def upsert( self, collection_name: str, points: types.Points, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def update_vectors( self, collection_name: str, points: Sequence[types.PointVectors], **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def delete_vectors( self, collection_name: str, vectors: Sequence[str], points: types.PointsSelector, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def retrieve( self, collection_name: str, ids: Sequence[types.PointId], with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, **kwargs: Any, ) -> list[types.Record]: raise NotImplementedError() def delete( self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def set_payload( self, collection_name: str, payload: types.Payload, points: types.PointsSelector, key: Optional[str] = None, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def overwrite_payload( self, collection_name: str, payload: types.Payload, points: types.PointsSelector, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def delete_payload( self, collection_name: str, keys: Sequence[str], points: types.PointsSelector, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def clear_payload( self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def batch_update_points( self, collection_name: str, update_operations: Sequence[types.UpdateOperation], **kwargs: Any, ) -> list[types.UpdateResult]: raise NotImplementedError() def update_collection_aliases( self, change_aliases_operations: Sequence[types.AliasOperations], **kwargs: Any, ) -> bool: raise NotImplementedError() def get_collection_aliases( self, collection_name: str, **kwargs: Any ) -> types.CollectionsAliasesResponse: raise NotImplementedError() def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse: raise NotImplementedError() def get_collections(self, **kwargs: Any) -> types.CollectionsResponse: raise NotImplementedError() def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo: raise NotImplementedError() def collection_exists(self, collection_name: str, **kwargs: Any) -> bool: raise NotImplementedError() def update_collection( self, collection_name: str, **kwargs: Any, ) -> bool: raise NotImplementedError() def delete_collection(self, collection_name: str, **kwargs: Any) -> bool: raise NotImplementedError() def create_collection( self, collection_name: str, vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]], **kwargs: Any, ) -> bool: raise NotImplementedError() def recreate_collection( self, collection_name: str, vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]], **kwargs: Any, ) -> bool: raise NotImplementedError() def upload_points( self, collection_name: str, points: Iterable[types.PointStruct], **kwargs: Any, ) -> None: raise NotImplementedError() def upload_collection( self, collection_name: str, vectors: Union[ dict[str, types.NumpyArray], types.NumpyArray, Iterable[types.VectorStruct] ], payload: Optional[Iterable[dict[Any, Any]]] = None, ids: Optional[Iterable[types.PointId]] = None, **kwargs: Any, ) -> None: raise NotImplementedError() def create_payload_index( self, collection_name: str, field_name: str, field_schema: Optional[types.PayloadSchemaType] = None, field_type: Optional[types.PayloadSchemaType] = None, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def delete_payload_index( self, collection_name: str, field_name: str, **kwargs: Any, ) -> types.UpdateResult: raise NotImplementedError() def list_snapshots( self, collection_name: str, **kwargs: Any ) -> list[types.SnapshotDescription]: raise NotImplementedError() def create_snapshot( self, collection_name: str, **kwargs: Any ) -> Optional[types.SnapshotDescription]: raise NotImplementedError() def delete_snapshot( self, collection_name: str, snapshot_name: str, **kwargs: Any ) -> Optional[bool]: raise NotImplementedError() def list_full_snapshots(self, **kwargs: Any) -> list[types.SnapshotDescription]: raise NotImplementedError() def create_full_snapshot(self, **kwargs: Any) -> Optional[types.SnapshotDescription]: raise NotImplementedError() def delete_full_snapshot(self, snapshot_name: str, **kwargs: Any) -> Optional[bool]: raise NotImplementedError() def recover_snapshot( self, collection_name: str, location: str, **kwargs: Any, ) -> Optional[bool]: raise NotImplementedError() def list_shard_snapshots( self, collection_name: str, shard_id: int, **kwargs: Any ) -> list[types.SnapshotDescription]: raise NotImplementedError() def create_shard_snapshot( self, collection_name: str, shard_id: int, **kwargs: Any ) -> Optional[types.SnapshotDescription]: raise NotImplementedError() def delete_shard_snapshot( self, collection_name: str, shard_id: int, snapshot_name: str, **kwargs: Any ) -> Optional[bool]: raise NotImplementedError() def recover_shard_snapshot( self, collection_name: str, shard_id: int, location: str, **kwargs: Any, ) -> Optional[bool]: raise NotImplementedError() def close(self, **kwargs: Any) -> None: pass def migrate( self, dest_client: "QdrantBase", collection_names: Optional[list[str]] = None, batch_size: int = 100, recreate_on_collision: bool = False, ) -> None: raise NotImplementedError() def create_shard_key( self, collection_name: str, shard_key: types.ShardKey, shards_number: Optional[int] = None, replication_factor: Optional[int] = None, placement: Optional[list[int]] = None, **kwargs: Any, ) -> bool: raise NotImplementedError() def delete_shard_key( self, collection_name: str, shard_key: types.ShardKey, **kwargs: Any, ) -> bool: raise NotImplementedError() def info(self) -> types.VersionInfo: raise NotImplementedError() def cluster_collection_update( self, collection_name: str, cluster_operation: types.ClusterOperations, **kwargs: Any, ) -> bool: raise NotImplementedError() def collection_cluster_info(self, collection_name: str) -> types.CollectionClusterInfo: raise NotImplementedError() def cluster_status(self) -> types.ClusterStatus: raise NotImplementedError() def recover_current_peer(self) -> bool: raise NotImplementedError() def remove_peer(self, peer_id: int, **kwargs: Any) -> bool: raise NotImplementedError()
QdrantBase
python
numba__numba
numba/tests/pdlike_usecase.py
{ "start": 1959, "end": 3851 }
class ____(types.ArrayCompatible): """ The type class for Series objects. """ array_priority = 1000 def __init__(self, dtype, index): assert isinstance(index, IndexType) self.dtype = dtype self.index = index self.values = types.Array(self.dtype, 1, 'C') name = "series(%s, %s)" % (dtype, index) super(SeriesType, self).__init__(name) @property def key(self): return self.dtype, self.index @property def as_array(self): return self.values def copy(self, dtype=None, ndim=1, layout='C'): assert ndim == 1 assert layout == 'C' if dtype is None: dtype = self.dtype return type(self)(dtype, self.index) @typeof_impl.register(Index) def typeof_index(val, c): arrty = typeof_impl(val._data, c) assert arrty.ndim == 1 return IndexType(arrty.dtype, arrty.layout, type(val)) @typeof_impl.register(Series) def typeof_series(val, c): index = typeof_impl(val._index, c) arrty = typeof_impl(val._values, c) assert arrty.ndim == 1 assert arrty.layout == 'C' return SeriesType(arrty.dtype, index) @type_callable('__array_wrap__') def type_array_wrap(context): def typer(input_type, result): if isinstance(input_type, (IndexType, SeriesType)): return input_type.copy(dtype=result.dtype, ndim=result.ndim, layout=result.layout) return typer @type_callable(Series) def type_series_constructor(context): def typer(data, index): if isinstance(index, IndexType) and isinstance(data, types.Array): assert data.layout == 'C' assert data.ndim == 1 return SeriesType(data.dtype, index) return typer # Backend extensions for Index and Series @register_model(IndexType)
SeriesType
python
doocs__leetcode
solution/0900-0999/0925.Long Pressed Name/Solution.py
{ "start": 0, "end": 535 }
class ____: def isLongPressedName(self, name: str, typed: str) -> bool: m, n = len(name), len(typed) i = j = 0 while i < m and j < n: if name[i] != typed[j]: return False x = i + 1 while x < m and name[x] == name[i]: x += 1 y = j + 1 while y < n and typed[y] == typed[j]: y += 1 if x - i > y - j: return False i, j = x, y return i == m and j == n
Solution
python
doocs__leetcode
solution/1700-1799/1737.Change Minimum Characters to Satisfy One of Three Conditions/Solution.py
{ "start": 0, "end": 610 }
class ____: def minCharacters(self, a: str, b: str) -> int: def f(cnt1, cnt2): for i in range(1, 26): t = sum(cnt1[i:]) + sum(cnt2[:i]) nonlocal ans ans = min(ans, t) m, n = len(a), len(b) cnt1 = [0] * 26 cnt2 = [0] * 26 for c in a: cnt1[ord(c) - ord('a')] += 1 for c in b: cnt2[ord(c) - ord('a')] += 1 ans = m + n for c1, c2 in zip(cnt1, cnt2): ans = min(ans, m + n - c1 - c2) f(cnt1, cnt2) f(cnt2, cnt1) return ans
Solution
python
geekcomputers__Python
venv/Lib/site-packages/pip/_internal/exceptions.py
{ "start": 20923, "end": 23832 }
class ____(DiagnosticPipError): """The current environment is externally managed. This is raised when the current environment is externally managed, as defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked and displayed when the error is bubbled up to the user. :param error: The error message read from ``EXTERNALLY-MANAGED``. """ reference = "externally-managed-environment" def __init__(self, error: Optional[str]) -> None: if error is None: context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR) else: context = Text(error) super().__init__( message="This environment is externally managed", context=context, note_stmt=( "If you believe this is a mistake, please contact your " "Python installation or OS distribution provider. " "You can override this, at the risk of breaking your Python " "installation or OS, by passing --break-system-packages." ), hint_stmt=Text("See PEP 668 for the detailed specification."), ) @staticmethod def _iter_externally_managed_error_keys() -> Iterator[str]: # LC_MESSAGES is in POSIX, but not the C standard. The most common # platform that does not implement this category is Windows, where # using other categories for console message localization is equally # unreliable, so we fall back to the locale-less vendor message. This # can always be re-evaluated when a vendor proposes a new alternative. try: category = locale.LC_MESSAGES except AttributeError: lang: Optional[str] = None else: lang, _ = locale.getlocale(category) if lang is not None: yield f"Error-{lang}" for sep in ("-", "_"): before, found, _ = lang.partition(sep) if not found: continue yield f"Error-{before}" yield "Error" @classmethod def from_config( cls, config: Union[pathlib.Path, str], ) -> "ExternallyManagedEnvironment": parser = configparser.ConfigParser(interpolation=None) try: parser.read(config, encoding="utf-8") section = parser["externally-managed"] for key in cls._iter_externally_managed_error_keys(): with contextlib.suppress(KeyError): return cls(section[key]) except KeyError: pass except (OSError, UnicodeDecodeError, configparser.ParsingError): from pip._internal.utils._log import VERBOSE exc_info = logger.isEnabledFor(VERBOSE) logger.warning("Failed to read %s", config, exc_info=exc_info) return cls(None)
ExternallyManagedEnvironment
python
huggingface__transformers
src/transformers/models/visual_bert/modeling_visual_bert.py
{ "start": 18343, "end": 18837 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.predictions = VisualBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score @auto_docstring
VisualBertPreTrainingHeads
python
coleifer__peewee
tests/fields.py
{ "start": 43427, "end": 43547 }
class ____(TestModel): schedule = ForeignKeyField(Schedule) name = TextField() last_run = DateTimeField()
Task
python
spack__spack
lib/spack/spack/vendor/macholib/mach_o.py
{ "start": 43697, "end": 46822 }
class ____(Structure): _fields_ = ( ("cputype", cpu_type_t), ("cpusubtype", cpu_subtype_t), ("offset", p_uint64), ("size", p_uint64), ("align", p_uint32), ("reserved", p_uint32), ) REBASE_TYPE_POINTER = 1 # noqa: E221 REBASE_TYPE_TEXT_ABSOLUTE32 = 2 # noqa: E221 REBASE_TYPE_TEXT_PCREL32 = 3 # noqa: E221 REBASE_OPCODE_MASK = 0xF0 # noqa: E221 REBASE_IMMEDIATE_MASK = 0x0F # noqa: E221 REBASE_OPCODE_DONE = 0x00 # noqa: E221 REBASE_OPCODE_SET_TYPE_IMM = 0x10 # noqa: E221 REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20 # noqa: E221 REBASE_OPCODE_ADD_ADDR_ULEB = 0x30 # noqa: E221 REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40 # noqa: E221 REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50 # noqa: E221 REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60 # noqa: E221 REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70 # noqa: E221 REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80 # noqa: E221 BIND_TYPE_POINTER = 1 # noqa: E221 BIND_TYPE_TEXT_ABSOLUTE32 = 2 # noqa: E221 BIND_TYPE_TEXT_PCREL32 = 3 # noqa: E221 BIND_SPECIAL_DYLIB_SELF = 0 # noqa: E221 BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1 # noqa: E221 BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2 # noqa: E221 BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1 # noqa: E221 BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION = 0x8 # noqa: E221 BIND_OPCODE_MASK = 0xF0 # noqa: E221 BIND_IMMEDIATE_MASK = 0x0F # noqa: E221 BIND_OPCODE_DONE = 0x00 # noqa: E221 BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10 # noqa: E221 BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20 # noqa: E221 BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30 # noqa: E221 BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40 # noqa: E221 BIND_OPCODE_SET_TYPE_IMM = 0x50 # noqa: E221 BIND_OPCODE_SET_ADDEND_SLEB = 0x60 # noqa: E221 BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70 # noqa: E221 BIND_OPCODE_ADD_ADDR_ULEB = 0x80 # noqa: E221 BIND_OPCODE_DO_BIND = 0x90 # noqa: E221 BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0 # noqa: E221 BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0 # noqa: E221 BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0 # noqa: E221 EXPORT_SYMBOL_FLAGS_KIND_MASK = 0x03 # noqa: E221 EXPORT_SYMBOL_FLAGS_KIND_REGULAR = 0x00 # noqa: E221 EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL = 0x01 # noqa: E221 EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION = 0x04 # noqa: E221 EXPORT_SYMBOL_FLAGS_REEXPORT = 0x08 # noqa: E221 EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER = 0x10 # noqa: E221 PLATFORM_MACOS = 1 PLATFORM_IOS = 2 PLATFORM_TVOS = 3 PLATFORM_WATCHOS = 4 PLATFORM_BRIDGEOS = 5 PLATFORM_IOSMAC = 6 PLATFORM_MACCATALYST = 6 PLATFORM_IOSSIMULATOR = 7 PLATFORM_TVOSSIMULATOR = 8 PLATFORM_WATCHOSSIMULATOR = 9 PLATFORM_NAMES = { PLATFORM_MACOS: "macOS", PLATFORM_IOS: "iOS", PLATFORM_TVOS: "tvOS", PLATFORM_WATCHOS: "watchOS", PLATFORM_BRIDGEOS: "bridgeOS", PLATFORM_MACCATALYST: "catalyst", PLATFORM_IOSSIMULATOR: "iOS simulator", PLATFORM_TVOSSIMULATOR: "tvOS simulator", PLATFORM_WATCHOSSIMULATOR: "watchOS simulator", } TOOL_CLANG = 1 TOOL_SWIFT = 2 TOOL_LD = 3 TOOL_NAMES = {TOOL_CLANG: "clang", TOOL_SWIFT: "swift", TOOL_LD: "ld"}
fat_arch64