language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 5411,
"end": 12119
} | class ____(NonStrictDataModel):
"""
:param label_rules: List of FilterLabelRule ('AND' connection)
disabled - No filtering by ROIs. Select all frames, even if they don't have
ROIs (all frames)
no_rois - Select only frames without ROIs (empty frames)
label_rules - Select frames according to label rules
:type label_rules: Sequence[FilterLabelRule]
:param filter_by_roi: Type of filter. Optional, the default value is
'label_rules'
:type filter_by_roi: FilterByRoiEnum
:param frame_query: Frame filter, in Lucene query syntax
:type frame_query: str
:param sources_query: Sources filter, in Lucene query syntax. Filters sources
in each frame.
:type sources_query: str
:param dataset: Dataset ID. Must be a dataset which is in the task's view. If
set to '*' all datasets in View are used.
:type dataset: str
:param version: Dataset version to apply rule to. Must belong to the dataset
and be in the task's view. If set to '*' all version of the datasets in View
are used.
:type version: str
:param weight: Rule weight. Default is 1
:type weight: float
"""
_schema = {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View "
"are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames,"
" even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty"
" frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to"
" '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {"description": "Rule weight. Default is 1", "type": "number"},
},
"required": ["dataset"],
"type": "object",
}
def __init__(
self,
dataset,
label_rules=None,
filter_by_roi=None,
frame_query=None,
sources_query=None,
version=None,
weight=None,
**kwargs
):
super(FilterRule, self).__init__(**kwargs)
self.label_rules = label_rules
self.filter_by_roi = filter_by_roi
self.frame_query = frame_query
self.sources_query = sources_query
self.dataset = dataset
self.version = version
self.weight = weight
@schema_property("label_rules")
def label_rules(self):
return self._property_label_rules
@label_rules.setter
def label_rules(self, value):
if value is None:
self._property_label_rules = None
return
self.assert_isinstance(value, "label_rules", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
FilterLabelRule.from_dict(v) if isinstance(v, dict) else v
for v in value
]
else:
self.assert_isinstance(value, "label_rules", FilterLabelRule, is_array=True)
self._property_label_rules = value
@schema_property("filter_by_roi")
def filter_by_roi(self):
return self._property_filter_by_roi
@filter_by_roi.setter
def filter_by_roi(self, value):
if value is None:
self._property_filter_by_roi = None
return
if isinstance(value, six.string_types):
try:
value = FilterByRoiEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "filter_by_roi", enum.Enum)
self._property_filter_by_roi = value
@schema_property("frame_query")
def frame_query(self):
return self._property_frame_query
@frame_query.setter
def frame_query(self, value):
if value is None:
self._property_frame_query = None
return
self.assert_isinstance(value, "frame_query", six.string_types)
self._property_frame_query = value
@schema_property("sources_query")
def sources_query(self):
return self._property_sources_query
@sources_query.setter
def sources_query(self, value):
if value is None:
self._property_sources_query = None
return
self.assert_isinstance(value, "sources_query", six.string_types)
self._property_sources_query = value
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("weight")
def weight(self):
return self._property_weight
@weight.setter
def weight(self, value):
if value is None:
self._property_weight = None
return
self.assert_isinstance(value, "weight", six.integer_types + (float,))
self._property_weight = value
| FilterRule |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 1579,
"end": 2705
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author", faceted=True)
pub_date = indexes.DateTimeField(model_attr="pub_date", faceted=True)
extra = indexes.CharField(indexed=False, use_template=True)
hello = indexes.CharField(model_attr="hello")
def prepare(self, obj):
super().prepare(obj)
self.prepared_data["whee"] = "Custom preparation."
return self.prepared_data
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def load_all_queryset(self):
return self.get_model()._default_manager.filter(id__gt=1)
def get_model(self):
return MockModel
def index_queryset(self, using=None):
return MockModel.objects.all()
def read_queryset(self, using=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
def build_queryset(self, start_date=None, end_date=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
| GoodCustomMockSearchIndex |
python | Netflix__metaflow | metaflow/_vendor/v3_6/typing_extensions.py | {
"start": 100188,
"end": 107795
} | class ____:
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* type such as
``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name):
self.__name__ = name
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if not PEP_560:
# Only needed in 3.6.
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
def reveal_type(__obj: T) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
return __obj
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
def assert_never(__arg: Never) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable")
if hasattr(typing, 'dataclass_transform'):
dataclass_transform = typing.dataclass_transform
else:
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from metaflow._vendor.v3_6.typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_descriptors`` specifies a static list of supported classes
or functions, that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"field_descriptors": field_descriptors,
}
return cls_or_fn
return decorator
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
| TypeVarTuple |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 88680,
"end": 91402
} | class ____(_DenseColumn, _CategoricalColumn,
collections.namedtuple('_BucketizedColumn',
['source_column', 'boundaries'])
):
"""See `bucketized_column`."""
@property
def name(self):
return '{}_bucketized'.format(self.source_column.name)
@property
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
@property
def _num_buckets(self):
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]), (-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor,
(-1,)) + (len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops_stack.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops_stack.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices, values=bucket_indices, dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None)
| _BucketizedColumn |
python | scipy__scipy | scipy/stats/_resampling.py | {
"start": 57651,
"end": 93735
} | class ____:
"""Result object returned by `scipy.stats.permutation_test`.
Attributes
----------
statistic : float or ndarray
The observed test statistic of the data.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
"""
statistic: float | np.ndarray
pvalue: float | np.ndarray
null_distribution: np.ndarray
def _all_partitions_concatenated(ns):
"""
Generate all partitions of indices of groups of given sizes, concatenated
`ns` is an iterable of ints.
"""
def all_partitions(z, n):
for c in combinations(z, n):
x0 = set(c)
x1 = z - x0
yield [x0, x1]
def all_partitions_n(z, ns):
if len(ns) == 0:
yield [z]
return
for c in all_partitions(z, ns[0]):
for d in all_partitions_n(c[1], ns[1:]):
yield c[0:1] + d
z = set(range(np.sum(ns)))
for partitioning in all_partitions_n(z, ns[:]):
x = np.concatenate([list(partition)
for partition in partitioning]).astype(int)
yield x
def _batch_generator(iterable, batch):
"""A generator that yields batches of elements from an iterable"""
iterator = iter(iterable)
if batch <= 0:
raise ValueError("`batch` must be positive.")
z = [item for i, item in zip(range(batch), iterator)]
while z: # we don't want StopIteration without yielding an empty list
yield z
z = [item for i, item in zip(range(batch), iterator)]
def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch,
rng):
# Returns a generator that yields arrays of size
# `(batch, n_samples, n_obs_sample)`.
# Each row is an independent permutation of indices 0 to `n_obs_sample`.
batch = min(batch, n_permutations)
if hasattr(rng, 'permuted'):
def batched_perm_generator():
indices = np.arange(n_obs_sample)
indices = np.tile(indices, (batch, n_samples, 1))
for k in range(0, n_permutations, batch):
batch_actual = min(batch, n_permutations-k)
# Don't permute in place, otherwise results depend on `batch`
permuted_indices = rng.permuted(indices, axis=-1)
yield permuted_indices[:batch_actual]
else: # RandomState and early Generators don't have `permuted`
def batched_perm_generator():
for k in range(0, n_permutations, batch):
batch_actual = min(batch, n_permutations-k)
size = (batch_actual, n_samples, n_obs_sample)
x = rng.random(size=size)
yield np.argsort(x, axis=-1)[:batch_actual]
return batched_perm_generator()
def _calculate_null_both(data, statistic, n_permutations, batch,
rng=None, *, xp):
"""
Calculate null distribution for independent sample tests.
"""
# compute number of permutations
# (distinct partitions of data into samples of these sizes)
n_obs_i = [sample.shape[-1] for sample in data] # observations per sample
n_obs_ic = list(accumulate(n_obs_i, initial=0))
n_obs = n_obs_ic[-1] # total number of observations
n_max = math.prod([math.comb(n, k) for n, k in zip(n_obs_ic[1:], n_obs_ic[:-1])])
# perm_generator is an iterator that produces permutations of indices
# from 0 to n_obs. We'll concatenate the samples, use these indices to
# permute the data, then split the samples apart again.
if n_permutations >= n_max:
exact_test = True
n_permutations = n_max
perm_generator = _all_partitions_concatenated(n_obs_i)
else:
exact_test = False
# Neither RandomState.permutation nor Generator.permutation
# can permute axis-slices independently. If this feature is
# added in the future, batches of the desired size should be
# generated in a single call.
perm_generator = (rng.permutation(n_obs)
for i in range(n_permutations))
batch = batch or int(n_permutations)
null_distribution = []
# First, concatenate all the samples. In batches, permute samples with
# indices produced by the `perm_generator`, split them into new samples of
# the original sizes, compute the statistic for each batch, and add these
# statistic values to the null distribution.
data = xp.concat(data, axis=-1)
for indices in _batch_generator(perm_generator, batch=batch):
# Creating a tensor from a list of numpy.ndarrays is extremely slow...
indices = np.asarray(indices)
indices = xp.asarray(indices)
# `indices` is 2D: each row is a permutation of the indices.
# We use it to index `data` along its last axis, which corresponds
# with observations.
# After indexing, the second to last axis of `data_batch` corresponds
# with permutations, and the last axis corresponds with observations.
# data_batch = data[..., indices]
data_batch = _get_from_last_axis(data, indices, xp=xp)
# Move the permutation axis to the front: we'll concatenate a list
# of batched statistic values along this zeroth axis to form the
# null distribution.
data_batch = xp.moveaxis(data_batch, -2, 0)
# data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1)
data_batch = [data_batch[..., i:j] for i, j in zip(n_obs_ic[:-1], n_obs_ic[1:])]
null_distribution.append(statistic(*data_batch, axis=-1))
null_distribution = xp.concat(null_distribution, axis=0)
return null_distribution, n_permutations, exact_test
def _calculate_null_pairings(data, statistic, n_permutations, batch,
rng=None, *, xp):
"""
Calculate null distribution for association tests.
"""
n_samples = len(data)
# compute number of permutations (factorial(n) permutations of each sample)
n_obs_sample = data[0].shape[-1] # observations per sample; same for each
n_max = math.factorial(n_obs_sample)**n_samples
# `perm_generator` is an iterator that produces a list of permutations of
# indices from 0 to n_obs_sample, one for each sample.
if n_permutations >= n_max:
exact_test = True
n_permutations = n_max
batch = batch or int(n_permutations)
# Cartesian product of the sets of all permutations of indices
perm_generator = product(*(permutations(range(n_obs_sample))
for i in range(n_samples)))
batched_perm_generator = _batch_generator(perm_generator, batch=batch)
else:
exact_test = False
batch = batch or int(n_permutations)
# Separate random permutations of indices for each sample.
# Again, it would be nice if RandomState/Generator.permutation
# could permute each axis-slice separately.
args = n_permutations, n_samples, n_obs_sample, batch, rng
batched_perm_generator = _pairings_permutations_gen(*args)
null_distribution = []
for indices in batched_perm_generator:
indices = xp.asarray(indices)
# `indices` is 3D: the zeroth axis is for permutations, the next is
# for samples, and the last is for observations. Swap the first two
# to make the zeroth axis correspond with samples, as it does for
# `data`.
indices = xp_swapaxes(indices, 0, 1, xp=xp)
# When we're done, `data_batch` will be a list of length `n_samples`.
# Each element will be a batch of random permutations of one sample.
# The zeroth axis of each batch will correspond with permutations,
# and the last will correspond with observations. (This makes it
# easy to pass into `statistic`.)
data_batch = [None]*n_samples
for i in range(n_samples):
# data_batch[i] = data[i][..., indices[i]]
data_batch[i] = _get_from_last_axis(data[i], indices[i, ...], xp=xp)
data_batch[i] = xp.moveaxis(data_batch[i], -2, 0)
null_distribution.append(statistic(*data_batch, axis=-1))
null_distribution = xp.concat(null_distribution, axis=0)
return null_distribution, n_permutations, exact_test
def _calculate_null_samples(data, statistic, n_permutations, batch,
rng=None, *, xp):
"""
Calculate null distribution for paired-sample tests.
"""
n_samples = len(data)
# By convention, the meaning of the "samples" permutations type for
# data with only one sample is to flip the sign of the observations.
# Achieve this by adding a second sample - the negative of the original.
if n_samples == 1:
data = [data[0], -data[0]]
# The "samples" permutation strategy is the same as the "pairings"
# strategy except the roles of samples and observations are flipped.
# So swap these axes, then we'll use the function for the "pairings"
# strategy to do all the work!
data = xp.stack(data, axis=0)
data = xp_swapaxes(data, 0, -1, xp=xp)
# (Of course, the user's statistic doesn't know what we've done here,
# so we need to pass it what it's expecting.)
def statistic_wrapped(*data, axis):
# can we do this without converting back and forth between
# array and list?
data = xp.stack(data, axis=0)
data = xp_swapaxes(data, 0, -1, xp=xp)
if n_samples == 1:
data = data[0:1, ...]
data = [data[i, ...] for i in range(data.shape[0])]
return statistic(*data, axis=axis)
data = [data[i, ...] for i in range(data.shape[0])]
return _calculate_null_pairings(data, statistic_wrapped, n_permutations,
batch, rng, xp=xp)
def _permutation_test_iv(data, statistic, permutation_type, vectorized,
n_resamples, batch, alternative, axis, rng):
"""Input validation for `permutation_test`."""
axis_int = int(axis)
if axis != axis_int:
raise ValueError("`axis` must be an integer.")
permutation_types = {'samples', 'pairings', 'independent'}
permutation_type = permutation_type.lower()
if permutation_type not in permutation_types:
raise ValueError(f"`permutation_type` must be in {permutation_types}.")
if vectorized not in {True, False, None}:
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
if vectorized is None:
vectorized = 'axis' in inspect.signature(statistic).parameters
message = "`data` must be a tuple containing at least two samples"
try:
if len(data) < 2 and permutation_type == 'independent':
raise ValueError(message)
except TypeError:
raise TypeError(message)
xp = array_namespace(*data)
if not vectorized:
if not is_numpy(xp):
message = (f"When using array library {xp.__name__}, `func` must be "
"vectorized and accept argument `axis`.")
raise TypeError(message)
statistic = _vectorize_statistic(statistic)
data = _broadcast_arrays(data, axis, xp=xp)
data_iv = []
for sample in data:
sample = xpx.atleast_nd(sample, ndim=1)
if sample.shape[axis] <= 1:
raise ValueError("each sample in `data` must contain two or more "
"observations along `axis`.")
sample = xp.moveaxis(sample, axis_int, -1)
data_iv.append(sample)
n_resamples_int = (int(n_resamples) if not math.isinf(n_resamples)
else xp.inf)
if n_resamples != n_resamples_int or n_resamples_int <= 0:
raise ValueError("`n_resamples` must be a positive integer.")
if batch is None:
batch_iv = batch
else:
batch_iv = int(batch)
if batch != batch_iv or batch_iv <= 0:
raise ValueError("`batch` must be a positive integer or None.")
alternatives = {'two-sided', 'greater', 'less'}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f"`alternative` must be in {alternatives}")
rng = check_random_state(rng)
float_dtype = xp_result_type(*data_iv, force_floating=True, xp=xp)
return (data_iv, statistic, permutation_type, vectorized, n_resamples_int,
batch_iv, alternative, axis_int, rng, float_dtype, xp)
@xp_capabilities(skip_backends=[('dask.array', "lacks required indexing capabilities")])
@_transition_to_rng('random_state')
def permutation_test(data, statistic, *, permutation_type='independent',
vectorized=None, n_resamples=9999, batch=None,
alternative="two-sided", axis=0, rng=None):
r"""
Performs a permutation test of a given statistic on provided data.
For independent sample statistics, the null hypothesis is that the data are
randomly sampled from the same distribution.
For paired sample statistics, two null hypothesis can be tested:
that the data are paired at random or that the data are assigned to samples
at random.
Parameters
----------
data : iterable of array-like
Contains the samples, each of which is an array of observations.
Dimensions of sample arrays must be compatible for broadcasting except
along `axis`.
statistic : callable
Statistic for which the p-value of the hypothesis test is to be
calculated. `statistic` must be a callable that accepts samples
as separate arguments (e.g. ``statistic(*data)``) and returns the
resulting statistic.
If `vectorized` is set ``True``, `statistic` must also accept a keyword
argument `axis` and be vectorized to compute the statistic along the
provided `axis` of the sample arrays.
permutation_type : {'independent', 'samples', 'pairings'}, optional
The type of permutations to be performed, in accordance with the
null hypothesis. The first two permutation types are for paired sample
statistics, in which all samples contain the same number of
observations and observations with corresponding indices along `axis`
are considered to be paired; the third is for independent sample
statistics.
- ``'samples'`` : observations are assigned to different samples
but remain paired with the same observations from other samples.
This permutation type is appropriate for paired sample hypothesis
tests such as the Wilcoxon signed-rank test and the paired t-test.
- ``'pairings'`` : observations are paired with different observations,
but they remain within the same sample. This permutation type is
appropriate for association/correlation tests with statistics such
as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's
:math:`r`.
- ``'independent'`` (default) : observations are assigned to different
samples. Samples may contain different numbers of observations. This
permutation type is appropriate for independent sample hypothesis
tests such as the Mann-Whitney :math:`U` test and the independent
sample t-test.
Please see the Notes section below for more detailed descriptions
of the permutation types.
vectorized : bool, optional
If `vectorized` is set ``False``, `statistic` will not be passed
keyword argument `axis` and is expected to calculate the statistic
only for 1D samples. If ``True``, `statistic` will be passed keyword
argument `axis` and is expected to calculate the statistic along `axis`
when passed an ND sample array. If ``None`` (default), `vectorized`
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use
of a vectorized statistic typically reduces computation time.
n_resamples : int or np.inf, default: 9999
Number of random permutations (resamples) used to approximate the null
distribution. If greater than or equal to the number of distinct
permutations, the exact null distribution will be computed.
Note that the number of distinct permutations grows very rapidly with
the sizes of samples, so exact tests are feasible only for very small
data sets.
batch : int, optional
The number of permutations to process in each call to `statistic`.
Memory usage is O( `batch` * ``n`` ), where ``n`` is the total size
of all samples, regardless of the value of `vectorized`. Default is
``None``, in which case ``batch`` is the number of permutations.
alternative : {'two-sided', 'less', 'greater'}, optional
The alternative hypothesis for which the p-value is calculated.
For each alternative, the p-value is defined for exact tests as
follows.
- ``'greater'`` : the percentage of the null distribution that is
greater than or equal to the observed value of the test statistic.
- ``'less'`` : the percentage of the null distribution that is
less than or equal to the observed value of the test statistic.
- ``'two-sided'`` (default) : twice the smaller of the p-values above.
Note that p-values for randomized tests are calculated according to the
conservative (over-estimated) approximation suggested in [2]_ and [3]_
rather than the unbiased estimator suggested in [4]_. That is, when
calculating the proportion of the randomized null distribution that is
as extreme as the observed value of the test statistic, the values in
the numerator and denominator are both increased by one. An
interpretation of this adjustment is that the observed value of the
test statistic is always included as an element of the randomized
null distribution.
The convention used for two-sided p-values is not universal;
the observed test statistic and null distribution are returned in
case a different definition is preferred.
axis : int, default: 0
The axis of the (broadcasted) samples over which to calculate the
statistic. If samples have a different number of dimensions,
singleton dimensions are prepended to samples with fewer dimensions
before `axis` is considered.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
Returns
-------
res : PermutationTestResult
An object with attributes:
statistic : float or ndarray
The observed test statistic of the data.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
Notes
-----
The three types of permutation tests supported by this function are
described below.
**Unpaired statistics** (``permutation_type='independent'``):
The null hypothesis associated with this permutation type is that all
observations are sampled from the same underlying distribution and that
they have been assigned to one of the samples at random.
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
When ``1 < n_resamples < binom(n, k)``, where
* ``k`` is the number of observations in ``a``,
* ``n`` is the total number of observations in ``a`` and ``b``, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either the first
or second sample, and the statistic is calculated. This process is
performed repeatedly, `permutation` times, generating a distribution of the
statistic under the null hypothesis. The statistic of the original
data is compared to this distribution to determine the p-value.
When ``n_resamples >= binom(n, k)``, an exact test is performed: the data
are *partitioned* between the samples in each distinct way exactly once,
and the exact null distribution is formed.
Note that for a given partitioning of the data between the samples,
only one ordering/permutation of the data *within* each sample is
considered. For statistics that do not depend on the order of the data
within samples, this dramatically reduces computational cost without
affecting the shape of the null distribution (because the frequency/count
of each value is affected by the same factor).
For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``.
Because only one ordering/permutation of the data *within* each sample
is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]``
and ``y = [a4, a3, b1]`` would *not* be considered distinct from the
example above.
``permutation_type='independent'`` does not support one-sample statistics,
but it can be applied to statistics with more than two samples. In this
case, if ``n`` is an array of the number of observations within each
sample, the number of distinct partitions is::
np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)])
**Paired statistics, permute pairings** (``permutation_type='pairings'``):
The null hypothesis associated with this permutation type is that
observations within each sample are drawn from the same underlying
distribution and that pairings with elements of other samples are
assigned at random.
Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we
wish to consider all possible pairings of elements of ``a`` with elements
of a second sample, ``b``. Let ``n`` be the number of observations in
``a``, which must also equal the number of observations in ``b``.
When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are
randomly permuted. The user-supplied statistic accepts one data argument,
say ``a_perm``, and calculates the statistic considering ``a_perm`` and
``b``. This process is performed repeatedly, `permutation` times,
generating a distribution of the statistic under the null hypothesis.
The statistic of the original data is compared to this distribution to
determine the p-value.
When ``n_resamples >= factorial(n)``, an exact test is performed:
``a`` is permuted in each distinct way exactly once. Therefore, the
`statistic` is computed for each unique pairing of samples between ``a``
and ``b`` exactly once.
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left
in its original order.
``permutation_type='pairings'`` supports ``data`` containing any number
of samples, each of which must contain the same number of observations.
All samples provided in ``data`` are permuted *independently*. Therefore,
if ``m`` is the number of samples and ``n`` is the number of observations
within each sample, then the number of permutations in an exact test is::
factorial(n)**m
Note that if a two-sample statistic, for example, does not inherently
depend on the order in which observations are provided - only on the
*pairings* of observations - then only one of the two samples should be
provided in ``data``. This dramatically reduces computational cost without
affecting the shape of the null distribution (because the frequency/count
of each value is affected by the same factor).
**Paired statistics, permute samples** (``permutation_type='samples'``):
The null hypothesis associated with this permutation type is that
observations within each pair are drawn from the same underlying
distribution and that the sample to which they are assigned is random.
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
Let ``n`` be the number of observations in ``a``, which must also equal
the number of observations in ``b``.
When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are
randomly swapped between samples (maintaining their pairings) and the
statistic is calculated. This process is performed repeatedly,
`permutation` times, generating a distribution of the statistic under the
null hypothesis. The statistic of the original data is compared to this
distribution to determine the p-value.
When ``n_resamples >= 2**n``, an exact test is performed: the observations
are assigned to the two samples in each distinct way (while maintaining
pairings) exactly once.
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``.
``permutation_type='samples'`` supports ``data`` containing any number
of samples, each of which must contain the same number of observations.
If ``data`` contains more than one sample, paired observations within
``data`` are exchanged between samples *independently*. Therefore, if ``m``
is the number of samples and ``n`` is the number of observations within
each sample, then the number of permutations in an exact test is::
factorial(m)**n
Several paired-sample statistical tests, such as the Wilcoxon signed rank
test and paired-sample t-test, can be performed considering only the
*difference* between two paired elements. Accordingly, if ``data`` contains
only one sample, then the null distribution is formed by independently
changing the *sign* of each observation.
.. warning::
The p-value is calculated by counting the elements of the null
distribution that are as extreme or more extreme than the observed
value of the statistic. Due to the use of finite precision arithmetic,
some statistic functions return numerically distinct values when the
theoretical values would be exactly equal. In some cases, this could
lead to a large error in the calculated p-value. `permutation_test`
guards against this by considering elements in the null distribution
that are "close" (within a relative tolerance of 100 times the
floating point epsilon of inexact dtypes) to the observed
value of the test statistic as equal to the observed value of the
test statistic. However, the user is advised to inspect the null
distribution to assess whether this method of comparison is
appropriate, and if not, calculate the p-value manually. See example
below.
References
----------
.. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951).
.. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
.. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference".
Statistical Science (2004).
.. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap
(1993).
Examples
--------
Suppose we wish to test whether two samples are drawn from the same
distribution. Assume that the underlying distributions are unknown to us,
and that before observing the data, we hypothesized that the mean of the
first sample would be less than that of the second sample. We decide that
we will use the difference between the sample means as a test statistic,
and we will consider a p-value of 0.05 to be statistically significant.
For efficiency, we write the function defining the test statistic in a
vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the
statistic will be calculated for each axis-slice along `axis`.
>>> import numpy as np
>>> def statistic(x, y, axis):
... return np.mean(x, axis=axis) - np.mean(y, axis=axis)
After collecting our data, we calculate the observed value of the test
statistic.
>>> from scipy.stats import norm
>>> rng = np.random.default_rng()
>>> x = norm.rvs(size=5, random_state=rng)
>>> y = norm.rvs(size=6, loc = 3, random_state=rng)
>>> statistic(x, y, 0)
-3.5411688580987266
Indeed, the test statistic is negative, suggesting that the true mean of
the distribution underlying ``x`` is less than that of the distribution
underlying ``y``. To determine the probability of this occurring by chance
if the two samples were drawn from the same distribution, we perform
a permutation test.
>>> from scipy.stats import permutation_test
>>> # because our statistic is vectorized, we pass `vectorized=True`
>>> # `n_resamples=np.inf` indicates that an exact test is to be performed
>>> res = permutation_test((x, y), statistic, vectorized=True,
... n_resamples=np.inf, alternative='less')
>>> print(res.statistic)
-3.5411688580987266
>>> print(res.pvalue)
0.004329004329004329
The probability of obtaining a test statistic less than or equal to the
observed value under the null hypothesis is 0.4329%. This is less than our
chosen threshold of 5%, so we consider this to be significant evidence
against the null hypothesis in favor of the alternative.
Because the size of the samples above was small, `permutation_test` could
perform an exact test. For larger samples, we resort to a randomized
permutation test.
>>> x = norm.rvs(size=100, random_state=rng)
>>> y = norm.rvs(size=120, loc=0.2, random_state=rng)
>>> res = permutation_test((x, y), statistic, n_resamples=9999,
... vectorized=True, alternative='less',
... rng=rng)
>>> print(res.statistic)
-0.4230459671240913
>>> print(res.pvalue)
0.0015
The approximate probability of obtaining a test statistic less than or
equal to the observed value under the null hypothesis is 0.0225%. This is
again less than our chosen threshold of 5%, so again we have significant
evidence to reject the null hypothesis in favor of the alternative.
For large samples and number of permutations, the result is comparable to
that of the corresponding asymptotic test, the independent sample t-test.
>>> from scipy.stats import ttest_ind
>>> res_asymptotic = ttest_ind(x, y, alternative='less')
>>> print(res_asymptotic.pvalue)
0.0014669545224902675
The permutation distribution of the test statistic is provided for
further investigation.
>>> import matplotlib.pyplot as plt
>>> plt.hist(res.null_distribution, bins=50)
>>> plt.title("Permutation distribution of test statistic")
>>> plt.xlabel("Value of Statistic")
>>> plt.ylabel("Frequency")
>>> plt.show()
Inspection of the null distribution is essential if the statistic suffers
from inaccuracy due to limited machine precision. Consider the following
case:
>>> from scipy.stats import pearsonr
>>> x = [1, 2, 4, 3]
>>> y = [2, 4, 6, 8]
>>> def statistic(x, y, axis=-1):
... return pearsonr(x, y, axis=axis).statistic
>>> res = permutation_test((x, y), statistic, vectorized=True,
... permutation_type='pairings',
... alternative='greater')
>>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
In this case, some elements of the null distribution differ from the
observed value of the correlation coefficient ``r`` due to numerical noise.
We manually inspect the elements of the null distribution that are nearly
the same as the observed value of the test statistic.
>>> r
0.7999999999999999
>>> unique = np.unique(null)
>>> unique
array([-1. , -1. , -0.8, -0.8, -0.8, -0.6, -0.4, -0.4, -0.2, -0.2, -0.2,
0. , 0.2, 0.2, 0.2, 0.4, 0.4, 0.6, 0.8, 0.8, 0.8, 1. ,
1. ]) # may vary
>>> unique[np.isclose(r, unique)].tolist()
[0.7999999999999998, 0.7999999999999999, 0.8] # may vary
If `permutation_test` were to perform the comparison naively, the
elements of the null distribution with value ``0.7999999999999998`` would
not be considered as extreme or more extreme as the observed value of the
statistic, so the calculated p-value would be too small.
>>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null)
>>> incorrect_pvalue
0.14583333333333334 # may vary
Instead, `permutation_test` treats elements of the null distribution that
are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the
statistic ``r`` to be equal to ``r``.
>>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null)
>>> correct_pvalue
0.16666666666666666
>>> res.pvalue == correct_pvalue
True
This method of comparison is expected to be accurate in most practical
situations, but the user is advised to assess this by inspecting the
elements of the null distribution that are close to the observed value
of the statistic. Also, consider the use of statistics that can be
calculated using exact arithmetic (e.g. integer statistics).
"""
args = _permutation_test_iv(data, statistic, permutation_type, vectorized,
n_resamples, batch, alternative, axis,
rng)
(data, statistic, permutation_type, vectorized, n_resamples, batch,
alternative, axis, rng, float_dtype, xp) = args
observed = statistic(*data, axis=-1)
null_calculators = {"pairings": _calculate_null_pairings,
"samples": _calculate_null_samples,
"independent": _calculate_null_both}
null_calculator_args = (data, statistic, n_resamples,
batch, rng)
calculate_null = null_calculators[permutation_type]
null_distribution, n_resamples, exact_test = (
calculate_null(*null_calculator_args, xp=xp))
# See References [2] and [3]
adjustment = 0 if exact_test else 1
# relative tolerance for detecting numerically distinct but
# theoretically equal values in the null distribution
eps = (0 if not xp.isdtype(observed.dtype, 'real floating')
else xp.finfo(observed.dtype).eps*100)
gamma = xp.abs(eps * observed)
def less(null_distribution, observed):
cmps = null_distribution <= observed + gamma
count = xp.count_nonzero(cmps, axis=0) + adjustment
pvalues = xp.astype(count, float_dtype) / (n_resamples + adjustment)
return pvalues
def greater(null_distribution, observed):
cmps = null_distribution >= observed - gamma
count = xp.count_nonzero(cmps, axis=0) + adjustment
pvalues = xp.astype(count, float_dtype) / (n_resamples + adjustment)
return pvalues
def two_sided(null_distribution, observed):
pvalues_less = less(null_distribution, observed)
pvalues_greater = greater(null_distribution, observed)
pvalues = xp.minimum(pvalues_less, pvalues_greater) * 2
return pvalues
compare = {"less": less,
"greater": greater,
"two-sided": two_sided}
pvalues = compare[alternative](null_distribution, observed)
pvalues = xp.clip(pvalues, 0., 1.)
return PermutationTestResult(observed, pvalues, null_distribution)
@dataclass
| PermutationTestResult |
python | apache__airflow | providers/opensearch/src/airflow/providers/opensearch/operators/opensearch.py | {
"start": 1523,
"end": 4389
} | class ____(BaseOperator):
"""
Run a query search against a given index on an OpenSearch cluster and returns results.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpenSearchQueryOperator`
:param query: A Dictionary OpenSearch DSL query.
:param search_object: A Search object from opensearch-dsl.
:param index_name: The name of the index to search for documents.
:param opensearch_conn_id: opensearch connection to use
:param opensearch_conn_class: opensearch connection class to use
:param log_query: Whether to log the query used. Defaults to True and logs query used.
"""
template_fields: Sequence[str] = ["query"]
def __init__(
self,
*,
query: dict | None = None,
search_object: Any | None = None,
index_name: str | None = None,
opensearch_conn_id: str = "opensearch_default",
opensearch_conn_class: type[OpenSearchConnectionClass] | None = RequestsHttpConnection,
log_query: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query = query
self.index_name = index_name
self.opensearch_conn_id = opensearch_conn_id
self.opensearch_conn_class = opensearch_conn_class
self.log_query = log_query
self.search_object = search_object
@cached_property
def hook(self) -> OpenSearchHook:
"""Get an instance of an OpenSearchHook."""
return OpenSearchHook(
open_search_conn_id=self.opensearch_conn_id,
open_search_conn_class=self.opensearch_conn_class,
log_query=self.log_query,
)
def execute(self, context: Context) -> Any:
"""Execute a search against a given index or a Search object on an OpenSearch Cluster."""
result = None
if self.query is not None:
if not self.query.get("query"):
raise AirflowException("Query input is missing required field Query in dictionary")
if self.index_name is None:
raise AirflowException("Index name is required when using the query input.")
try:
result = self.hook.search(index_name=self.index_name, query=self.query)
except OpenSearchException as e:
raise AirflowException(e)
elif self.search_object is not None:
try:
result = self.search_object.using(self.hook.client).execute()
except OpenSearchException as e:
raise AirflowException(e)
else:
raise AirflowException(
"""Input missing required input of query or search_object.
Either query or search_object is required."""
)
return result
| OpenSearchQueryOperator |
python | huggingface__transformers | src/transformers/models/xlm/tokenization_xlm.py | {
"start": 4395,
"end": 23337
} | class ____(PreTrainedTokenizer):
"""
Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization for most supported languages.
- Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP).
- Optionally lowercases and normalizes all inputs text.
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
"__classify__") to a vocabulary.
- The `lang2id` attribute maps the languages supported by the model with their IDs if provided (automatically set
for pretrained vocabularies).
- The `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Vocabulary file.
merges_file (`str`):
Merges file.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"</s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"<special1>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
List of additional special tokens.
lang2id (`Dict[str, int]`, *optional*):
Dictionary mapping languages string identifiers to their IDs.
id2lang (`Dict[int, str]`, *optional*):
Dictionary mapping language IDs to their string identifiers.
do_lowercase_and_remove_accent (`bool`, *optional*, defaults to `True`):
Whether to lowercase and remove accents when tokenizing.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
merges_file,
unk_token="<unk>",
bos_token="<s>",
sep_token="</s>",
pad_token="<pad>",
cls_token="</s>",
mask_token="<special1>",
additional_special_tokens=[
"<special0>",
"<special1>",
"<special2>",
"<special3>",
"<special4>",
"<special5>",
"<special6>",
"<special7>",
"<special8>",
"<special9>",
],
lang2id=None,
id2lang=None,
do_lowercase_and_remove_accent=True,
**kwargs,
):
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = {}
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = {}
self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
# True for current supported model (v1.2.0), False for XLM-17 & 100
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
lang2id=lang2id,
id2lang=id2lang,
do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
**kwargs,
)
@property
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(
f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
)
except (AttributeError, ImportError):
logger.error(
"Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
" (https://github.com/chezou/Mykytea-python) with the following steps"
)
logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
logger.error("2. autoreconf -i")
logger.error("3. ./configure --prefix=$HOME/local")
logger.error("4. make && make install")
logger.error("5. pip install kytea")
raise
return list(self.ja_word_tokenizer.getWS(text))
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
"""
Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizer.
Otherwise, we use Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
- [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer
- Install with `pip install pythainlp`
- [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of
[KyTea](https://github.com/neubig/kytea)
- Install with the following steps:
::
git clone git@github.com:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local
make && make install pip install kytea
- [rjieba](https://github.com/messense/rjieba-py): Chinese tokenizer (*)
- Install with `pip install rjieba`
(*) The original XLM used [Stanford
Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper
(`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot
faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you
fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM
[preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence
externally, and set `bypass_tokenizer=True` to bypass the tokenizer.
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
if lang and self.lang2id and lang not in self.lang2id:
logger.error(
"Supplied language code not found in lang2id mapping. Please check that your language is supported by"
" the loaded pretrained model."
)
if bypass_tokenizer:
text = text.split()
elif lang not in self.lang_with_custom_tokenizer:
text = self.moses_pipeline(text, lang=lang)
# TODO: make sure we are using `FacebookAI/xlm-mlm-enro-1024`, since XLM-100 doesn't have this step
if lang == "ro":
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif lang == "th":
text = self.moses_pipeline(text, lang=lang)
try:
if "pythainlp" not in sys.modules:
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules["pythainlp"].word_tokenize
except (AttributeError, ImportError):
logger.error(
"Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps"
)
logger.error("1. pip install pythainlp")
raise
text = th_word_tokenize(text)
elif lang == "zh":
try:
if "rjieba" not in sys.modules:
import rjieba
else:
rjieba = sys.modules["rjieba"]
except (AttributeError, ImportError):
logger.error(
"Make sure you install rjieba (https://github.com/messense/rjieba-py) with the following steps"
)
logger.error("1. pip install rjieba")
raise
text = " ".join(rjieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif lang == "ja":
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError("It should not reach here")
if self.do_lowercase_and_remove_accent and not bypass_tokenizer:
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = "".join(tokens).replace("</w>", " ").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return bos + token_ids_0 + sep
return bos + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def __getstate__(self):
state = self.__dict__.copy()
state["sm"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
__all__ = ["XLMTokenizer"]
| XLMTokenizer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 13563,
"end": 14001
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "ResourceNotFoundError"
resource_name = graphene.NonNull(graphene.String)
def __init__(self, resource_name):
super().__init__()
self.resource_name = check.str_param(resource_name, "resource_name")
self.message = f"Top-level resource {self.resource_name} could not be found."
| GrapheneResourceNotFoundError |
python | jupyterlab__jupyterlab | examples/filebrowser/main.py | {
"start": 569,
"end": 1466
} | class ____(LabServerApp):
extension_url = "/example"
default_url = "/example"
app_url = "/example"
load_other_extensions = False
name = __name__
app_name = "JupyterLab Example File Browser"
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
app_version = version
app_settings_dir = os.path.join(HERE, "build", "application_settings")
schemas_dir = os.path.join(HERE, "build", "schemas")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_settings(self):
super().initialize_settings()
settings = self.serverapp.web_app.settings
settings["terminals_available"] = False
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | huggingface__transformers | src/transformers/processing_utils.py | {
"start": 20744,
"end": 23456
} | class ____(TypedDict, total=False):
"""
Keyword arguments for tokenizer's `apply_chat_template`, when it is called from within a processor.
tools (`list[Dict]`, *optional*):
A list of tools (callable functions) that will be accessible to the model. If the template does not
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
giving the name, description and argument types for the tool. See our
[chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
for more information.
documents (`list[dict[str, str]]`, *optional*):
A list of dicts representing documents that will be accessible to the model if it is performing RAG
(retrieval-augmented generation). If the template does not support RAG, this argument will have no
effect. We recommend that each document should be a dict containing "title" and "text" keys. Please
see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG)
for examples of passing documents with chat templates.
add_generation_prompt (bool, *optional*):
If this is set, a prompt with the token(s) that indicate
the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
Note that this argument will be passed to the chat template, and so it must be supported in the
template for this argument to have any effect.
continue_final_message (bool, *optional*):
If this is set, the chat will be formatted so that the final
message in the chat is open-ended, without any EOS tokens. The model will continue this message
rather than starting a new one. This allows you to "prefill" part of
the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
return_assistant_tokens_mask (`bool`, defaults to `False`):
Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant,
the mask will contain 1. For user and system tokens, the mask will contain 0.
This functionality is only available for chat templates that support it via the `{% generation %}` keyword.
"""
tools: Optional[list[dict]] = None
documents: Optional[list[dict[str, str]]] = None
add_generation_prompt: Optional[bool] = False
continue_final_message: Optional[bool] = False
return_assistant_tokens_mask: Optional[bool] = False
| TokenizerChatTemplateKwargs |
python | sympy__sympy | sympy/polys/series/tring.py | {
"start": 984,
"end": 3046
} | class ____(Generic[Er]):
"""Dummy type for lower power series elements, used only for static type checking."""
# Dummy method to ensure that TSeriesElement is invariant in Er.
def __invalid__(self, other: Er) -> Er:
raise NotImplementedError
flint = True
# Types for lower ring power series ring combining different ground types.
TSeriesRing = PowerSeriesRingProto[TSeriesElement[Er], Er]
TSeriesRingField = PowerSeriesRingFieldProto[TSeriesElement[Ef], Ef]
def PowerSeriesRingZZ(prec: int = 6) -> TSeriesRing[MPZ]:
if FlintPowerSeriesRingZZ is None:
R_python: PowerSeriesRingProto[USeries[MPZ], MPZ] = PythonPowerSeriesRingZZ(
prec
)
return cast("TSeriesRing[MPZ]", R_python)
else:
R_flint: PowerSeriesRingProto[ZZSeries, MPZ] = FlintPowerSeriesRingZZ(prec)
return cast("TSeriesRing[MPZ]", R_flint)
def PowerSeriesRingQQ(prec: int = 6) -> TSeriesRingField[MPQ]:
if FlintPowerSeriesRingQQ is None:
R_python: PowerSeriesRingProto[USeries[MPQ], MPQ] = PythonPowerSeriesRingQQ(
prec
)
return cast("TSeriesRingField[MPQ]", R_python)
else:
R_flint: PowerSeriesRingProto[QQSeries, MPQ] = FlintPowerSeriesRingQQ(prec)
return cast("TSeriesRingField[MPQ]", R_flint)
@overload
def _power_series_ring(K: Field[Ef], prec: int = 6) -> TSeriesRingField[Ef]: ...
@overload
def _power_series_ring(K: Domain[Er], prec: int = 6) -> TSeriesRing[Er]: ...
def _power_series_ring(
K: Domain[Er] | Field[Ef], prec: int = 6
) -> TSeriesRing[Er] | TSeriesRingField[Ef]:
"""
Helper function for the Power Series Ring classes to create a base ring from lower
power series rings.
"""
if K.is_ZZ:
R_ZZ: TSeriesRing[MPZ] = PowerSeriesRingZZ(prec)
return cast("TSeriesRing[Er]", R_ZZ)
elif K.is_QQ:
R_QQ: TSeriesRingField[MPQ] = PowerSeriesRingQQ(prec)
return cast("TSeriesRingField[Ef]", R_QQ)
else:
raise ValueError(f"Unsupported ground domain: {K}")
| TSeriesElement |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/footsies/fixed_rlmodules.py | {
"start": 273,
"end": 1037
} | class ____(RLModule):
def _forward_inference(self, batch, **kwargs):
return self._fixed_forward(batch, **kwargs)
def _forward_exploration(self, batch, **kwargs):
return self._fixed_forward(batch, **kwargs)
def _forward_train(self, *args, **kwargs):
raise NotImplementedError(
f"RLlib: {self.__class__.__name__} should not be trained. "
f"It is a fixed RLModule, returning a fixed action for all observations."
)
def _fixed_forward(self, batch, **kwargs):
"""Implements a fixed that always returns the same action."""
raise NotImplementedError(
"FixedRLModule: This method should be overridden by subclasses to implement a specific action."
)
| FixedRLModule |
python | google__pytype | pytype/overlays/named_tuple.py | {
"start": 1097,
"end": 2629
} | class ____:
"""Collection of properties used to construct a namedtuple."""
name: str
fields: list[Field]
bases: list[Any]
@classmethod
def from_field_names(cls, name, field_names, ctx):
"""Make a NamedTupleProperties from field names with no types."""
fields = [Field(n, ctx.convert.unsolvable, None) for n in field_names]
return cls(name, fields, [])
def validate_and_rename_fields(self, rename):
"""Validate and rename self.fields.
namedtuple field names have some requirements:
- must not be a Python keyword
- must consist of only alphanumeric characters and "_"
- must not start with "_" or a digit
Basically, they're valid Python identifiers that don't start with "_" or a
digit. Also, there can be no duplicate field names.
If rename is true, any invalid field names are changed to "_%d". For
example, "abc def ghi abc" becomes "abc _1 def _3" because "def" is a
keyword and "abc" is a duplicate.
Also validates self.name, which has the same requirements, except it can
start with "_", and cannot be changed.
"""
if not utils.is_valid_name(self.name):
raise ValueError(self.name)
seen = set()
for idx, f in enumerate(self.fields):
if (
not utils.is_valid_name(f.name)
or f.name.startswith("_")
or f.name in seen
):
if rename:
f.name = "_%d" % idx
else:
raise ValueError(f.name)
seen.add(f.name)
@dataclasses.dataclass
| NamedTupleProperties |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 454885,
"end": 455121
} | class ____(Response):
"""
Response of tasks.move endpoint.
"""
_service = "tasks"
_action = "move"
_version = "2.23"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
| MoveResponse |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 5826,
"end": 6594
} | class ____(_NumcodecsCodec, ArrayArrayCodec):
def __init__(self, **codec_config: JSON) -> None:
super().__init__(**codec_config)
async def _decode_single(self, chunk_data: NDBuffer, chunk_spec: ArraySpec) -> NDBuffer:
chunk_ndarray = chunk_data.as_ndarray_like()
out = await asyncio.to_thread(self._codec.decode, chunk_ndarray)
return chunk_spec.prototype.nd_buffer.from_ndarray_like(out.reshape(chunk_spec.shape))
async def _encode_single(self, chunk_data: NDBuffer, chunk_spec: ArraySpec) -> NDBuffer:
chunk_ndarray = chunk_data.as_ndarray_like()
out = await asyncio.to_thread(self._codec.encode, chunk_ndarray)
return chunk_spec.prototype.nd_buffer.from_ndarray_like(out)
| _NumcodecsArrayArrayCodec |
python | readthedocs__readthedocs.org | readthedocs/projects/querysets.py | {
"start": 8198,
"end": 8323
} | class ____(RelatedProjectQuerySet):
project_field = "parent"
use_for_related_fields = True
| ParentRelatedProjectQuerySet |
python | wandb__wandb | wandb/vendor/pygments/lexers/modeling.py | {
"start": 3719,
"end": 6911
} | class ____(RegexLexer):
"""
Pygments Lexer for `OpenBugs <http://www.openbugs.net/>`_ and WinBugs
models.
.. versionadded:: 1.6
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = (
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
# Special
'D', 'I', 'F', 'T', 'C')
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish')
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][\w.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
| BugsLexer |
python | huggingface__transformers | src/transformers/models/layoutlm/modeling_layoutlm.py | {
"start": 8243,
"end": 8955
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextAttention with AlignText->LayoutLM
| LayoutLMSelfOutput |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/client_tests/test_get_run_status.py | {
"start": 2314,
"end": 3382
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_get_run_status(self, graphql_context, graphql_client):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
"mode": "default",
}
},
)
assert not result.errors
assert result.data
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
start_time = time.time()
while True:
if time.time() - start_time > 30:
raise Exception("Timed out waiting for get_run_status to return SUCCESS")
status = graphql_client.get_run_status(run_id)
if status == DagsterRunStatus.SUCCESS:
break
time.sleep(3)
| TestGetRunStatusWithClient |
python | ray-project__ray | doc/source/serve/doc_code/custom_metrics_autoscaling.py | {
"start": 374,
"end": 1259
} | class ____:
def __init__(self):
self.cpu_usage = 50.0
self.memory_usage = 60.0
def __call__(self) -> str:
time.sleep(0.5)
self.cpu_usage = min(100, self.cpu_usage + 5)
self.memory_usage = min(100, self.memory_usage + 3)
return "Hello, world!"
def record_autoscaling_stats(self) -> Dict[str, float]:
self.cpu_usage = max(20, self.cpu_usage - 2)
self.memory_usage = max(30, self.memory_usage - 1)
return {
"cpu_usage": self.cpu_usage,
"memory_usage": self.memory_usage,
}
# Create the app
app = CustomMetricsDeployment.bind()
# __serve_example_end__
if __name__ == "__main__":
import requests # noqa
serve.run(app)
for _ in range(10):
resp = requests.get("http://localhost:8000/")
assert resp.text == "Hello, world!"
| CustomMetricsDeployment |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Robot_arm/arm_env.py | {
"start": 3963,
"end": 8473
} | class ____(pyglet.window.Window):
color = {
'background': [1]*3 + [1]
}
fps_display = pyglet.clock.ClockDisplay()
bar_thc = 5
def __init__(self, width, height, arm_info, point_info, point_l, mouse_in):
super(Viewer, self).__init__(width, height, resizable=False, caption='Arm', vsync=False) # vsync=False to not use the monitor FPS
self.set_location(x=80, y=10)
pyglet.gl.glClearColor(*self.color['background'])
self.arm_info = arm_info
self.point_info = point_info
self.mouse_in = mouse_in
self.point_l = point_l
self.center_coord = np.array((min(width, height)/2, ) * 2)
self.batch = pyglet.graphics.Batch()
arm1_box, arm2_box, point_box = [0]*8, [0]*8, [0]*8
c1, c2, c3 = (249, 86, 86)*4, (86, 109, 249)*4, (249, 39, 65)*4
self.point = self.batch.add(4, pyglet.gl.GL_QUADS, None, ('v2f', point_box), ('c3B', c2))
self.arm1 = self.batch.add(4, pyglet.gl.GL_QUADS, None, ('v2f', arm1_box), ('c3B', c1))
self.arm2 = self.batch.add(4, pyglet.gl.GL_QUADS, None, ('v2f', arm2_box), ('c3B', c1))
def render(self):
pyglet.clock.tick()
self._update_arm()
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
def on_draw(self):
self.clear()
self.batch.draw()
# self.fps_display.draw()
def _update_arm(self):
point_l = self.point_l
point_box = (self.point_info[0] - point_l, self.point_info[1] - point_l,
self.point_info[0] + point_l, self.point_info[1] - point_l,
self.point_info[0] + point_l, self.point_info[1] + point_l,
self.point_info[0] - point_l, self.point_info[1] + point_l)
self.point.vertices = point_box
arm1_coord = (*self.center_coord, *(self.arm_info[0, 2:4])) # (x0, y0, x1, y1)
arm2_coord = (*(self.arm_info[0, 2:4]), *(self.arm_info[1, 2:4])) # (x1, y1, x2, y2)
arm1_thick_rad = np.pi / 2 - self.arm_info[0, 1]
x01, y01 = arm1_coord[0] - np.cos(arm1_thick_rad) * self.bar_thc, arm1_coord[1] + np.sin(
arm1_thick_rad) * self.bar_thc
x02, y02 = arm1_coord[0] + np.cos(arm1_thick_rad) * self.bar_thc, arm1_coord[1] - np.sin(
arm1_thick_rad) * self.bar_thc
x11, y11 = arm1_coord[2] + np.cos(arm1_thick_rad) * self.bar_thc, arm1_coord[3] - np.sin(
arm1_thick_rad) * self.bar_thc
x12, y12 = arm1_coord[2] - np.cos(arm1_thick_rad) * self.bar_thc, arm1_coord[3] + np.sin(
arm1_thick_rad) * self.bar_thc
arm1_box = (x01, y01, x02, y02, x11, y11, x12, y12)
arm2_thick_rad = np.pi / 2 - self.arm_info[1, 1]
x11_, y11_ = arm2_coord[0] + np.cos(arm2_thick_rad) * self.bar_thc, arm2_coord[1] - np.sin(
arm2_thick_rad) * self.bar_thc
x12_, y12_ = arm2_coord[0] - np.cos(arm2_thick_rad) * self.bar_thc, arm2_coord[1] + np.sin(
arm2_thick_rad) * self.bar_thc
x21, y21 = arm2_coord[2] - np.cos(arm2_thick_rad) * self.bar_thc, arm2_coord[3] + np.sin(
arm2_thick_rad) * self.bar_thc
x22, y22 = arm2_coord[2] + np.cos(arm2_thick_rad) * self.bar_thc, arm2_coord[3] - np.sin(
arm2_thick_rad) * self.bar_thc
arm2_box = (x11_, y11_, x12_, y12_, x21, y21, x22, y22)
self.arm1.vertices = arm1_box
self.arm2.vertices = arm2_box
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.UP:
self.arm_info[0, 1] += .1
print(self.arm_info[:, 2:4] - self.point_info)
elif symbol == pyglet.window.key.DOWN:
self.arm_info[0, 1] -= .1
print(self.arm_info[:, 2:4] - self.point_info)
elif symbol == pyglet.window.key.LEFT:
self.arm_info[1, 1] += .1
print(self.arm_info[:, 2:4] - self.point_info)
elif symbol == pyglet.window.key.RIGHT:
self.arm_info[1, 1] -= .1
print(self.arm_info[:, 2:4] - self.point_info)
elif symbol == pyglet.window.key.Q:
pyglet.clock.set_fps_limit(1000)
elif symbol == pyglet.window.key.A:
pyglet.clock.set_fps_limit(30)
def on_mouse_motion(self, x, y, dx, dy):
self.point_info[:] = [x, y]
def on_mouse_enter(self, x, y):
self.mouse_in[0] = True
def on_mouse_leave(self, x, y):
self.mouse_in[0] = False
| Viewer |
python | django-extensions__django-extensions | tests/test_validators.py | {
"start": 260,
"end": 1939
} | class ____(TestCase):
"""Tests for NoControlCharactersValidator."""
def test_should_raise_default_message_and_code_if_value_contains_new_line(self):
self.validator = NoControlCharactersValidator()
value_with_new_line = "test\nvalue"
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_new_line)
self.assertEqual(
cm.exception.message,
"Control Characters like new lines or tabs are not allowed.",
)
self.assertEqual(cm.exception.code, "no_control_characters")
self.assertDictEqual(
cm.exception.params, {"value": value_with_new_line, "whitelist": None}
)
def test_should_raise_custom_message_and_code_if_value_contains_tabs(self):
self.validator = NoControlCharactersValidator(
message="custom message", code="custom code"
)
value_with_tabs = "test\tvalue"
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_tabs)
self.assertEqual(cm.exception.message, "custom message")
self.assertEqual(cm.exception.code, "custom code")
self.assertDictEqual(
cm.exception.params, {"value": value_with_tabs, "whitelist": None}
)
def test_should_not_raise_if_value_contains_characters_which_is_on_whitelist(self):
self.validator = NoControlCharactersValidator(
message="custom message", code="custom code", whitelist=["\n"]
)
value_with_new_line = "test\nvalue"
result = self.validator(value_with_new_line)
self.assertIsNone(result)
| NoControlCharactersValidatorTests |
python | django__django | tests/model_fields/test_textfield.py | {
"start": 131,
"end": 1240
} | class ____(TestCase):
def test_max_length_passed_to_formfield(self):
"""
TextField passes its max_length attribute to form fields created using
their formfield() method.
"""
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_choices_generates_select_widget(self):
"""A TextField with choices uses a Select widget."""
f = models.TextField(choices=[("A", "A"), ("B", "B")])
self.assertIsInstance(f.formfield().widget, forms.Select)
def test_to_python(self):
"""TextField.to_python() should return a string."""
f = models.TextField()
self.assertEqual(f.to_python(1), "1")
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
def test_emoji(self):
p = Post.objects.create(title="Whatever", body="Smile 😀.")
p.refresh_from_db()
self.assertEqual(p.body, "Smile 😀.")
| TextFieldTests |
python | doocs__leetcode | solution/2100-2199/2119.A Number After a Double Reversal/Solution.py | {
"start": 0,
"end": 111
} | class ____:
def isSameAfterReversals(self, num: int) -> bool:
return num == 0 or num % 10 != 0
| Solution |
python | django__django | tests/backends/tests.py | {
"start": 972,
"end": 1948
} | class ____(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates("last_updated", "year")
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against
fields which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
| DateQuotingTest |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 6309,
"end": 6563
} | class ____(EllipticCurve):
name = "sect571r1"
key_size = 570
group_order = 0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E8382E9BB2FE84E47 # noqa: E501
| SECT571R1 |
python | google__jax | tests/jaxpr_effects_test.py | {
"start": 22607,
"end": 28802
} | class ____(jtu.JaxTestCase):
def test_effects_disallowed_in_cond(self):
def f1(x):
def true_fun(x):
effect_p.bind(effect=foo_effect)
return x
def false_fun(x):
return x
return lax.cond(True, true_fun, false_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f1)(2.)
def test_allowed_effect_in_cond(self):
def f(x):
def true_fun(x):
effect_p.bind(effect=while_effect)
return x
def false_fun(x):
effect_p.bind(effect=while_effect)
return x
return lax.cond(x, true_fun, false_fun, x)
f(2)
def test_allowed_effect_in_cond_jvp(self):
def f(x):
def true_fun(x):
effect_p.bind(effect=while_effect)
return x
def false_fun(x):
effect_p.bind(effect=while_effect)
return x
return lax.cond(True, true_fun, false_fun, x)
# test primal side gets effect
primal_jaxpr = jax.make_jaxpr(lambda x: jax.linearize(f, x)[0])(2.)
self.assertEqual(primal_jaxpr.effects, {while_effect})
# and tangent side does not
_, f_lin = jax.linearize(f, 2.)
lin_jaxpr = f_lin.func.fun.args[0]
self.assertEqual(lin_jaxpr.effects, set())
def test_allowed_effect_in_cond_jvp2(self):
@jax.custom_jvp
def print_tangents(x):
return x
@print_tangents.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
# TODO(mattjj,sharadmv): don't require data dependence for jax.linearize!
# effect_p.bind(t, effect=while_effect)
t, = effect_p.bind(t, effect=while_effect) # data dep only on tangents
return x, t
def f(x):
def true_fun(x):
return print_tangents(x)
def false_fun(x):
return print_tangents(x)
return lax.cond(True, true_fun, false_fun, x)
# test primal side does not get effect
primal_jaxpr = jax.make_jaxpr(lambda x: jax.linearize(f, x)[0])(2.)
self.assertEqual(primal_jaxpr.effects, set())
# and tangent side does
_, f_lin = jax.linearize(f, 2.)
lin_jaxpr = f_lin.func.fun.args[0]
self.assertEqual(lin_jaxpr.effects, {while_effect})
def test_allowed_ordered_effect_in_cond(self):
def f(x):
def true_fun(x):
effect_p.bind(effect=while1_effect)
return x
def false_fun(x):
effect_p.bind(effect=while1_effect)
return x
return lax.cond(x, true_fun, false_fun, x)
f(2)
def test_multiple_allowed_ordered_effect_in_cond(self):
def f(x):
def true_fun(x):
effect_p.bind(effect=while1_effect)
effect_p.bind(effect=while2_effect)
return x
def false_fun(x):
effect_p.bind(effect=while1_effect)
effect_p.bind(effect=while2_effect)
return x
return lax.cond(x, true_fun, false_fun, x)
f(2)
def f2(x):
def true_fun(x):
return x
def false_fun(x):
effect_p.bind(effect=foo_effect)
return x
return lax.cond(True, true_fun, false_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f2)(2.)
def test_allowed_effect_in_while_body(self):
def f(x):
def cond_fun(x):
return False
def body_fun(x):
effect_p.bind(effect=while_effect)
return x
return lax.while_loop(cond_fun, body_fun, x)
f(2)
def test_allowed_effect_in_cond_body(self):
def f(x):
def cond_fun(x):
effect_p.bind(effect=while_effect)
return False
def body_fun(x):
return x
return lax.while_loop(cond_fun, body_fun, x)
f(2)
def test_allowed_ordered_effect_in_while_body(self):
def f(x):
def cond_fun(x):
return False
def body_fun(x):
effect_p.bind(effect=while1_effect)
return x
return lax.while_loop(cond_fun, body_fun, x)
f(2)
def test_multiple_allowed_ordered_effect_in_while_body(self):
def f(x):
def cond_fun(x):
return False
def body_fun(x):
effect_p.bind(effect=while1_effect)
effect_p.bind(effect=while2_effect)
return x
return lax.while_loop(cond_fun, body_fun, x)
f(2)
def test_effects_disallowed_in_while(self):
def f1(x):
def cond_fun(x):
effect_p.bind(effect=foo_effect)
return False
def body_fun(x):
return x
return lax.while_loop(cond_fun, body_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f1)(2.)
def f2(x):
def cond_fun(x):
return False
def body_fun(x):
effect_p.bind(effect=foo_effect)
return x
return lax.while_loop(cond_fun, body_fun, x)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f2)(2.)
def test_allowed_effect_in_scan(self):
def f(x):
def body_fun(carry, x):
effect_p.bind(effect=while_effect)
return carry, x
return lax.scan(body_fun, x, jnp.arange(5))
f(2)
def test_allowed_ordered_effect_in_scan(self):
def f(x):
def body_fun(carry, x):
effect_p.bind(effect=while1_effect)
return carry, x
return lax.scan(body_fun, x, jnp.arange(5))
f(2)
def test_multiple_allowed_ordered_effect_in_scan(self):
def f(x):
def body_fun(carry, x):
effect_p.bind(effect=while1_effect)
effect_p.bind(effect=while2_effect)
return carry, x
return lax.scan(body_fun, x, jnp.arange(5))
f(2)
def test_effects_disallowed_in_scan(self):
def f(x):
def body(carry, x):
effect_p.bind(effect=foo_effect)
return carry, x
return lax.scan(body, x, jnp.arange(4))
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
input_effect_p = core.Primitive('input_effect')
input_effect_p.multiple_results = True
input_effect = input_effect_p.bind
def _input_effect_abstract_eval(*avals, index):
return [], {InputEffect(index)}
input_effect_p.def_effectful_abstract_eval(_input_effect_abstract_eval)
| ControlFlowEffectsTest |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-brave-search/llama_index/tools/brave_search/base.py | {
"start": 247,
"end": 1967
} | class ____(BaseToolSpec):
"""
Brave Search tool spec.
"""
spec_functions = ["brave_search"]
def __init__(self, api_key: str) -> None:
"""
Initialize with parameters.
"""
self.api_key = api_key
def _make_request(self, params: Dict) -> requests.Response:
"""
Make a request to the Brave Search API.
Args:
params (dict): The parameters to be passed to the API.
Returns:
requests.Response: The response from the API.
"""
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip",
"X-Subscription-Token": self.api_key,
}
url = SEARCH_URL_TMPL.format(params=urllib.parse.urlencode(params))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response
def brave_search(
self, query: str, search_lang: str = "en", num_results: int = 5
) -> [Document]:
"""
Make a query to the Brave Search engine to receive a list of results.
Args:
query (str): The query to be passed to Brave Search.
search_lang (str): The search language preference (ISO 639-1), default is "en".
num_results (int): The number of search results returned in response, default is 5.
Returns:
[Document]: A list of documents containing search results.
"""
search_params = {
"q": query,
"search_lang": search_lang,
"count": num_results,
}
response = self._make_request(search_params)
return [Document(text=response.text)]
| BraveSearchToolSpec |
python | encode__django-rest-framework | tests/test_serializer_nested.py | {
"start": 271,
"end": 1571
} | class ____:
def setup_method(self):
class NestedSerializer(serializers.Serializer):
one = serializers.IntegerField(max_value=10)
two = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
nested = NestedSerializer()
self.Serializer = TestSerializer
def test_nested_validate(self):
input_data = {
'nested': {
'one': '1',
'two': '2',
}
}
expected_data = {
'nested': {
'one': 1,
'two': 2,
}
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == expected_data
def test_nested_serialize_empty(self):
expected_data = {
'nested': {
'one': None,
'two': None
}
}
serializer = self.Serializer()
assert serializer.data == expected_data
def test_nested_serialize_no_data(self):
data = None
serializer = self.Serializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['No data provided']}
| TestNestedSerializer |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 60364,
"end": 60633
} | class ____(_ConfigBase):
virtual_per_physical: int
desired_count: int
actual_count: int
desired_virtual_count: int
actual_virtual_count: int
key: str
strategy: str
function: str
ShardingConfig = _ShardingConfig
@dataclass
| _ShardingConfig |
python | tensorflow__tensorflow | tensorflow/python/framework/function.py | {
"start": 23530,
"end": 26841
} | class ____(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, tensor_lib.Tensor):
raise ValueError(f"Expected a Tensor but got {x} with type {type(x)}.")
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
| _OverloadedFunction |
python | django-haystack__django-haystack | haystack/inputs.py | {
"start": 3422,
"end": 4452
} | class ____(BaseInput):
"""
If the engine supports it, this input type allows for submitting a query
that uses a different parser.
"""
input_type_name = "alt_parser"
post_process = False
use_parens = False
def __init__(self, parser_name, query_string="", **kwargs):
self.parser_name = parser_name
self.query_string = query_string
self.kwargs = kwargs
def __repr__(self):
return "<%s '%s' '%s' '%s'>" % (
self.__class__.__name__,
self.parser_name,
self.query_string,
self.kwargs,
)
def prepare(self, query_obj):
if not hasattr(query_obj, "build_alt_parser_query"):
warnings.warn(
"Use of 'AltParser' input type is being ignored, as the '%s' backend doesn't support them."
% query_obj
)
return ""
return query_obj.build_alt_parser_query(
self.parser_name, self.query_string, **self.kwargs
)
| AltParser |
python | uqfoundation__dill | dill/tests/test_sources.py | {
"start": 666,
"end": 8672
} | class ____:
pass
_bar = Bar()
# repeat, but from test_source.py
import test_source as ts
# test objects created in other test modules
import test_mixins as tm
import dill.source as ds
def test_isfrommain():
assert ds.isfrommain(add) == True
assert ds.isfrommain(squared) == True
assert ds.isfrommain(Bar) == True
assert ds.isfrommain(_bar) == True
assert ds.isfrommain(ts.add) == False
assert ds.isfrommain(ts.squared) == False
assert ds.isfrommain(ts.Bar) == False
assert ds.isfrommain(ts._bar) == False
assert ds.isfrommain(tm.quad) == False
assert ds.isfrommain(tm.double_add) == False
assert ds.isfrommain(tm.quadratic) == False
assert ds.isdynamic(add) == False
assert ds.isdynamic(squared) == False
assert ds.isdynamic(ts.add) == False
assert ds.isdynamic(ts.squared) == False
assert ds.isdynamic(tm.double_add) == False
assert ds.isdynamic(tm.quadratic) == False
def test_matchlambda():
assert ds._matchlambda(f, 'f = lambda x: x**2\n')
assert ds._matchlambda(squared, 'squared = lambda x:x**2\n')
assert ds._matchlambda(ts.f, 'f = lambda x: x**2\n')
assert ds._matchlambda(ts.squared, 'squared = lambda x:x**2\n')
def test_findsource():
lines, lineno = ds.findsource(add)
assert lines[lineno] == 'def add(x,y):\n'
lines, lineno = ds.findsource(ts.add)
assert lines[lineno] == 'def add(x,y):\n'
lines, lineno = ds.findsource(squared)
assert lines[lineno] == 'squared = lambda x:x**2\n'
lines, lineno = ds.findsource(ts.squared)
assert lines[lineno] == 'squared = lambda x:x**2\n'
lines, lineno = ds.findsource(Bar)
assert lines[lineno] == 'class Bar:\n'
lines, lineno = ds.findsource(ts.Bar)
assert lines[lineno] == 'class Bar:\n'
lines, lineno = ds.findsource(_bar)
assert lines[lineno] == 'class Bar:\n'
lines, lineno = ds.findsource(ts._bar)
assert lines[lineno] == 'class Bar:\n'
lines, lineno = ds.findsource(tm.quad)
assert lines[lineno] == 'def quad(a=1, b=1, c=0):\n'
lines, lineno = ds.findsource(tm.double_add)
assert lines[lineno] == ' def func(*args, **kwds):\n'
lines, lineno = ds.findsource(tm.quadratic)
assert lines[lineno] == ' def dec(f):\n'
def test_getsourcelines():
assert ''.join(ds.getsourcelines(add)[0]) == 'def add(x,y):\n return x+y\n'
assert ''.join(ds.getsourcelines(ts.add)[0]) == 'def add(x,y):\n return x+y\n'
assert ''.join(ds.getsourcelines(squared)[0]) == 'squared = lambda x:x**2\n'
assert ''.join(ds.getsourcelines(ts.squared)[0]) == 'squared = lambda x:x**2\n'
assert ''.join(ds.getsourcelines(Bar)[0]) == 'class Bar:\n pass\n'
assert ''.join(ds.getsourcelines(ts.Bar)[0]) == 'class Bar:\n pass\n'
assert ''.join(ds.getsourcelines(_bar)[0]) == 'class Bar:\n pass\n' #XXX: ?
assert ''.join(ds.getsourcelines(ts._bar)[0]) == 'class Bar:\n pass\n' #XXX: ?
assert ''.join(ds.getsourcelines(tm.quad)[0]) == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n'
assert ''.join(ds.getsourcelines(tm.quadratic)[0]) == ' def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n'
assert ''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0]) == 'def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n'
assert ''.join(ds.getsourcelines(tm.quadratic, enclosing=True)[0]) == 'def quad_factory(a=1,b=1,c=0):\n def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n return dec\n'
assert ''.join(ds.getsourcelines(tm.double_add)[0]) == ' def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n'
assert ''.join(ds.getsourcelines(tm.double_add, enclosing=True)[0]) == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n'
def test_indent():
assert ds.outdent(''.join(ds.getsourcelines(tm.quadratic)[0])) == ''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0])
assert ds.indent(''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0]), 2) == ''.join(ds.getsourcelines(tm.quadratic)[0])
def test_dumpsource():
local = {}
exec(ds.dumpsource(add, alias='raw'), {}, local)
exec(ds.dumpsource(ts.add, alias='mod'), {}, local)
assert local['raw'](1,2) == local['mod'](1,2)
exec(ds.dumpsource(squared, alias='raw'), {}, local)
exec(ds.dumpsource(ts.squared, alias='mod'), {}, local)
assert local['raw'](3) == local['mod'](3)
assert ds._wrap(add)(1,2) == ds._wrap(ts.add)(1,2)
assert ds._wrap(squared)(3) == ds._wrap(ts.squared)(3)
def test_name():
assert ds._namespace(add) == ds.getname(add, fqn=True).split('.')
assert ds._namespace(ts.add) == ds.getname(ts.add, fqn=True).split('.')
assert ds._namespace(squared) == ds.getname(squared, fqn=True).split('.')
assert ds._namespace(ts.squared) == ds.getname(ts.squared, fqn=True).split('.')
assert ds._namespace(Bar) == ds.getname(Bar, fqn=True).split('.')
assert ds._namespace(ts.Bar) == ds.getname(ts.Bar, fqn=True).split('.')
assert ds._namespace(tm.quad) == ds.getname(tm.quad, fqn=True).split('.')
#XXX: the following also works, however behavior may be wrong for nested functions
#assert ds._namespace(tm.double_add) == ds.getname(tm.double_add, fqn=True).split('.')
#assert ds._namespace(tm.quadratic) == ds.getname(tm.quadratic, fqn=True).split('.')
assert ds.getname(add) == 'add'
assert ds.getname(ts.add) == 'add'
assert ds.getname(squared) == 'squared'
assert ds.getname(ts.squared) == 'squared'
assert ds.getname(Bar) == 'Bar'
assert ds.getname(ts.Bar) == 'Bar'
assert ds.getname(tm.quad) == 'quad'
assert ds.getname(tm.double_add) == 'func' #XXX: ?
assert ds.getname(tm.quadratic) == 'dec' #XXX: ?
def test_getimport():
local = {}
exec(ds.getimport(add, alias='raw'), {}, local)
exec(ds.getimport(ts.add, alias='mod'), {}, local)
assert local['raw'](1,2) == local['mod'](1,2)
exec(ds.getimport(squared, alias='raw'), {}, local)
exec(ds.getimport(ts.squared, alias='mod'), {}, local)
assert local['raw'](3) == local['mod'](3)
exec(ds.getimport(Bar, alias='raw'), {}, local)
exec(ds.getimport(ts.Bar, alias='mod'), {}, local)
assert ds.getname(local['raw']) == ds.getname(local['mod'])
exec(ds.getimport(tm.quad, alias='mod'), {}, local)
assert local['mod']()(sum)([1,2,3]) == tm.quad()(sum)([1,2,3])
#FIXME: wrong results for nested functions (e.g. tm.double_add, tm.quadratic)
def test_importable():
assert ds.importable(add, source=False) == ds.getimport(add)
assert ds.importable(add) == ds.getsource(add)
assert ds.importable(squared, source=False) == ds.getimport(squared)
assert ds.importable(squared) == ds.getsource(squared)
assert ds.importable(Bar, source=False) == ds.getimport(Bar)
assert ds.importable(Bar) == ds.getsource(Bar)
assert ds.importable(ts.add) == ds.getimport(ts.add)
assert ds.importable(ts.add, source=True) == ds.getsource(ts.add)
assert ds.importable(ts.squared) == ds.getimport(ts.squared)
assert ds.importable(ts.squared, source=True) == ds.getsource(ts.squared)
assert ds.importable(ts.Bar) == ds.getimport(ts.Bar)
assert ds.importable(ts.Bar, source=True) == ds.getsource(ts.Bar)
if __name__ == '__main__':
test_isfrommain()
test_matchlambda()
test_findsource()
test_getsourcelines()
test_indent()
test_dumpsource()
test_name()
test_getimport()
test_importable()
| Bar |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py | {
"start": 14983,
"end": 15895
} | class ____(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnTriL()
@test_util.run_deprecated_v1
def test_diag_plus_tril(self):
diag = linalg.LinearOperatorDiag([1., 2.])
tril = linalg.LinearOperatorLowerTriangular([[10., 0.], [30., 0.]])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(diag, diag))
self.assertTrue(self._adder.can_add(diag, tril))
operator = self._adder.add(diag, tril, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorLowerTriangular)
with self.cached_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
| AddAndReturnTriLTest |
python | huggingface__transformers | src/transformers/models/ctrl/modeling_ctrl.py | {
"start": 4376,
"end": 5837
} | class ____(nn.Module):
def __init__(self, d_model_size, num_heads, dff, rate=0.1, layer_idx=None):
super().__init__()
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, layer_idx=layer_idx)
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-6)
self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-6)
self.dropout1 = nn.Dropout(rate)
self.dropout2 = nn.Dropout(rate)
def forward(
self,
x,
mask,
layer_past=None,
attention_mask=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
normed,
normed,
normed,
mask,
layer_past=layer_past,
attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
@auto_docstring
| EncoderLayer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 364448,
"end": 365146
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count", "total_size")
edges = sgqlc.types.Field(sgqlc.types.list_of("LanguageEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Language"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
total_size = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalSize")
| LanguageConnection |
python | Pylons__pyramid | docs/quick_tutorial/databases/tutorial/tests.py | {
"start": 946,
"end": 1474
} | class ____(unittest.TestCase):
def setUp(self):
from pyramid.paster import get_app
app = get_app('development.ini')
from webtest import TestApp
self.testapp = TestApp(app)
def tearDown(self):
from .models import DBSession
DBSession.remove()
def test_it(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'Wiki: View', res.body)
res = self.testapp.get('/add', status=200)
self.assertIn(b'Add/Edit', res.body)
| WikiFunctionalTests |
python | coleifer__peewee | peewee.py | {
"start": 15013,
"end": 16120
} | class ____(collections.namedtuple('_State', ('scope', 'parentheses',
'settings'))):
def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs):
return super(State, cls).__new__(cls, scope, parentheses, kwargs)
def __call__(self, scope=None, parentheses=None, **kwargs):
# Scope and settings are "inherited" (parentheses is not, however).
scope = self.scope if scope is None else scope
# Try to avoid unnecessary dict copying.
if kwargs and self.settings:
settings = self.settings.copy() # Copy original settings dict.
settings.update(kwargs) # Update copy with overrides.
elif kwargs:
settings = kwargs
else:
settings = self.settings
return State(scope, parentheses, **settings)
def __getattr__(self, attr_name):
return self.settings.get(attr_name)
def __scope_context__(scope):
@contextmanager
def inner(self, **kwargs):
with self(scope=scope, **kwargs):
yield self
return inner
| State |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/overrides.py | {
"start": 262,
"end": 502
} | class ____:
q: str = "q"
r: str = "r"
def __init__(self, arg):
self.r = arg
def methodA(self, arg):
pass
def methodB(self):
pass
@classmethod
def classMethod(cls, arg):
pass
| Base |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 14272,
"end": 14415
} | class ____(models.Model):
name = models.CharField(max_length=255)
children = models.ManyToManyField(ClassB, blank=True, null=True)
| ClassA |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_equals.py | {
"start": 105,
"end": 2947
} | class ____:
def test_dataframe_not_equal(self):
# see GH#28839
df1 = DataFrame({"a": [1, 2], "b": ["s", "d"]})
df2 = DataFrame({"a": ["s", "d"], "b": [1, 2]})
assert df1.equals(df2) is False
def test_equals_different_blocks(self, using_infer_string):
# GH#9330
df0 = DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
if not using_infer_string:
# this assert verifies that the above operations have
# induced a block rearrangement
assert df0._mgr.blocks[0].dtype != df1._mgr.blocks[0].dtype
# do the real tests
tm.assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_equals(self):
# Add object dtype column with nans
index = np.random.default_rng(2).random(10)
df1 = DataFrame(
np.random.default_rng(2).random(10), index=index, columns=["floats"]
)
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
df1["start"] = date_range("2000-1-1", periods=10, freq="min")
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
df1["diff"] = df1["end"] - df1["start"]
# Explicitly cast to object, to avoid implicit cast when setting np.nan
df1["bool"] = (np.arange(10) % 3 == 0).astype(object)
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1["text"].equals(df2["text"])
assert df1["start"].equals(df2["start"])
assert df1["end"].equals(df2["end"])
assert df1["diff"].equals(df2["diff"])
assert df1["bool"].equals(df2["bool"])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different["floats"] = different["floats"].astype("float32")
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = date_range("2000-1-1", periods=10, freq="min")
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(["text"], append=True)
df2 = df1.set_index(["text"], append=True)
assert df3.equals(df2)
df2 = df1.set_index(["floats"], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(["floats"], append=True)
df2 = df1.set_index(["floats"], append=True)
assert df3.equals(df2)
| TestEquals |
python | getsentry__sentry | tests/sentry/search/eap/test_spans.py | {
"start": 25769,
"end": 33556
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project(name="test")
self.resolver = SearchResolver(
params=SnubaParams(projects=[self.project]),
config=SearchResolverConfig(),
definitions=SPAN_DEFINITIONS,
)
def test_simple_op_field(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("span.op")
assert resolved_column.proto_definition == AttributeKey(
name="sentry.op", type=AttributeKey.Type.TYPE_STRING
)
assert virtual_context is None
def test_project_field(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("project")
assert resolved_column.proto_definition == AttributeKey(
name="project", type=AttributeKey.Type.TYPE_STRING
)
assert virtual_context is not None
assert virtual_context.constructor(self.resolver.params) == VirtualColumnContext(
from_column_name="sentry.project_id",
to_column_name="project",
value_map={str(self.project.id): self.project.slug},
)
def test_project_slug_field(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("project.slug")
assert resolved_column.proto_definition == AttributeKey(
name="project.slug", type=AttributeKey.Type.TYPE_STRING
)
assert virtual_context is not None
assert virtual_context.constructor(self.resolver.params) == VirtualColumnContext(
from_column_name="sentry.project_id",
to_column_name="project.slug",
value_map={str(self.project.id): self.project.slug},
)
def test_simple_tag(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("tags[foo]")
assert resolved_column.proto_definition == AttributeKey(
name="foo", type=AttributeKey.Type.TYPE_STRING
)
assert virtual_context is None
def test_simple_string_tag(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("tags[foo, string]")
assert resolved_column.proto_definition == AttributeKey(
name="foo", type=AttributeKey.Type.TYPE_STRING
)
assert virtual_context is None
def test_simple_number_tag(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("tags[foo, number]")
assert resolved_column.proto_definition == AttributeKey(
name="foo", type=AttributeKey.Type.TYPE_DOUBLE
)
assert virtual_context is None
def test_sum_function(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("sum(span.self_time)")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_SUM,
key=AttributeKey(name="sentry.exclusive_time_ms", type=AttributeKey.Type.TYPE_DOUBLE),
label="sum(span.self_time)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_sum_default_argument(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("sum()")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_SUM,
key=AttributeKey(name="sentry.duration_ms", type=AttributeKey.Type.TYPE_DOUBLE),
label="sum()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_function_alias(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("sum() as test")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_SUM,
key=AttributeKey(name="sentry.duration_ms", type=AttributeKey.Type.TYPE_DOUBLE),
label="test",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_count(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("count()")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
resolved_column, virtual_context = self.resolver.resolve_column("count(span.duration)")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
label="count(span.duration)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_p50(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("p50()")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_P50,
key=AttributeKey(name="sentry.duration_ms", type=AttributeKey.Type.TYPE_DOUBLE),
label="p50()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_count_unique(self) -> None:
resolved_column, virtual_context = self.resolver.resolve_column("count_unique(span.action)")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_UNIQ,
key=AttributeKey(name="sentry.action", type=AttributeKey.Type.TYPE_STRING),
label="count_unique(span.action)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
def test_resolver_cache_attribute(self) -> None:
self.resolver.resolve_columns(["span.op"])
assert "span.op" in self.resolver._resolved_attribute_cache
project_column, project_context = self.resolver.resolve_column("project")
# Override the cache so we can confirm its being used
self.resolver._resolved_attribute_cache["span.op"] = project_column, project_context # type: ignore[assignment]
# If we resolve op again, we should get the project context and column instead
resolved_column, virtual_context = self.resolver.resolve_column("span.op")
assert (resolved_column, virtual_context) == (project_column, project_context)
def test_resolver_cache_function(self) -> None:
self.resolver.resolve_columns(["count()"])
assert "count()" in self.resolver._resolved_function_cache
p95_column, p95_context = self.resolver.resolve_column("p95(span.duration) as foo")
self.resolver._resolved_function_cache["count()"] = p95_column, p95_context # type: ignore[assignment]
resolved_column, virtual_context = self.resolver.resolve_column("count()")
assert (resolved_column, virtual_context) == (p95_column, p95_context)
def test_loads_deprecated_attrs_json() -> None:
with open(os.path.join(SENTRY_CONVENTIONS_DIRECTORY, "deprecated_attributes.json"), "rb") as f:
deprecated_attrs = json.loads(f.read())["attributes"]
attribute = deprecated_attrs[0]
assert attribute["key"]
assert attribute["deprecation"]
| SearchResolverColumnTest |
python | getsentry__sentry | src/sentry/organizations/services/organization/model.py | {
"start": 11345,
"end": 12700
} | class ____(RpcModel):
"""
This object wraps an organization result inside of its membership context in terms of an (optional) user id.
This is due to the large number of callsites that require an organization and a user's membership at the
same time and in a consistency state. This object allows a nice envelop for both of these ideas from a single
transactional query. Used by access, determine_active_organization, and others.
"""
# user_id is None iff the get_organization_by_id call is not provided a user_id context.
user_id: int | None = None
# The organization is always non-null because the null wrapping is around this object instead.
# A None organization => a None RpcUserOrganizationContext
organization: RpcOrganization = Field(default_factory=lambda: RpcOrganization())
# member can be None when the given user_id does not have membership with the given organization.
# Note that all related fields of this organization member are filtered by visibility and is_active=True.
member: RpcOrganizationMember | None = None
def __post_init__(self) -> None:
# Ensures that outer user_id always agrees with the inner member object.
if self.user_id is not None and self.member is not None:
assert self.user_id == self.member.user_id
| RpcUserOrganizationContext |
python | protocolbuffers__protobuf | python/google/protobuf/descriptor_database.py | {
"start": 565,
"end": 5936
} | class ____(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
existing proto in the database.
"""
proto_name = file_desc_proto.name
if proto_name not in self._file_desc_protos_by_file:
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
raise DescriptorDatabaseConflictingDefinitionError(
'%s already added, but with different descriptor.' % proto_name)
else:
return
# Add all the top-level descriptors to the index.
package = file_desc_proto.package
for message in file_desc_proto.message_type:
for name in _ExtractSymbols(message, package):
self._AddSymbol(name, file_desc_proto)
for enum in file_desc_proto.enum_type:
self._AddSymbol(
('.'.join((package, enum.name)) if package else enum.name),
file_desc_proto,
)
for enum_value in enum.value:
self._file_desc_protos_by_symbol[
'.'.join((package, enum_value.name)) if package else enum_value.name
] = file_desc_proto
for extension in file_desc_proto.extension:
self._AddSymbol(
('.'.join((package, extension.name)) if package else extension.name),
file_desc_proto,
)
for service in file_desc_proto.service:
self._AddSymbol(
('.'.join((package, service.name)) if package else service.name),
file_desc_proto,
)
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
'some.package.name.Message.some_field'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
if symbol.count('.') == 1 and symbol[0] == '.':
symbol = symbol.lstrip('.')
warnings.warn(
'Please remove the leading "." when '
'FindFileContainingSymbol, this will turn to error '
'in 2026 Jan.',
RuntimeWarning,
)
try:
return self._file_desc_protos_by_symbol[symbol]
except KeyError:
# Fields, enum values, and nested extensions are not in
# _file_desc_protos_by_symbol. Try to find the top level
# descriptor. Non-existent nested symbol under a valid top level
# descriptor can also be found. The behavior is the same with
# protobuf C++.
top_level, _, _ = symbol.rpartition('.')
try:
return self._file_desc_protos_by_symbol[top_level]
except KeyError:
# Raise the original symbol as a KeyError for better diagnostics.
raise KeyError(symbol)
def FindFileContainingExtension(self, extendee_name, extension_number):
# TODO: implement this API.
return None
def FindAllExtensionNumbers(self, extendee_name):
# TODO: implement this API.
return []
def _AddSymbol(self, name, file_desc_proto):
if name in self._file_desc_protos_by_symbol:
warn_msg = ('Conflict register for file "' + file_desc_proto.name +
'": ' + name +
' is already defined in file "' +
self._file_desc_protos_by_symbol[name].name + '"')
warnings.warn(warn_msg, RuntimeWarning)
self._file_desc_protos_by_symbol[name] = file_desc_proto
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = package + '.' + desc_proto.name if package else desc_proto.name
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| DescriptorDatabase |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-upside-down.py | {
"start": 584,
"end": 1079
} | class ____(object):
# @param root, a tree node
# @return root of the upside down tree
def upsideDownBinaryTree(self, root):
return self.upsideDownBinaryTreeRecu(root, None)
def upsideDownBinaryTreeRecu(self, p, parent):
if p is None:
return parent
root = self.upsideDownBinaryTreeRecu(p.left, p)
if parent:
p.left = parent.right
else:
p.left = None
p.right = parent
return root
| Solution2 |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_model_checkpoint_step_interval_val_metric.py | {
"start": 562,
"end": 3590
} | class ____(LightningModule):
def __init__(self, val_scores: list[float]):
super().__init__()
self.layer = nn.Linear(1, 1)
self._val_scores = [float(s) for s in val_scores]
# LightningModule API (minimal)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.layer(x)
loss = F.mse_loss(y_hat, y)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
# do nothing per-step; we log at epoch end
pass
def on_validation_epoch_end(self):
# Log a validation metric only at validation epoch end
# Values increase across epochs; best should be the last epoch
score = self._val_scores[self.current_epoch]
# use logger=True so it lands in trainer.callback_metrics
self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True)
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.01)
@pytest.mark.parametrize("val_scores", [[0.1, 0.5, 1.0]])
def test_model_checkpoint_every_n_train_steps_with_val_metric_saves_after_val(tmp_path, val_scores):
"""Reproduces #20919: Using every_n_train_steps with a validation-only metric should save the best checkpoint only
after the metric is computed at validation, not earlier at the train-step boundary.
Expectation: best_model_score equals the last (max) val score.
"""
seed_everything(123)
# 2 train batches per epoch (so checkpoint triggers at the epoch boundary)
ds = TinyDataset(n=4)
train_loader = DataLoader(ds, batch_size=2, shuffle=False)
val_loader = DataLoader(ds, batch_size=2, shuffle=False)
model = ValMetricModule(val_scores=val_scores)
ckpt = ModelCheckpoint(
dirpath=tmp_path,
monitor="auroc",
mode="max",
save_top_k=1,
# critical: trigger on train steps, not on epoch end
every_n_train_steps=2, # equal to number of train batches per epoch
train_time_interval=None,
every_n_epochs=0,
save_on_train_epoch_end=False,
save_weights_only=True,
)
trainer = Trainer(
max_epochs=len(val_scores),
accelerator="cpu",
devices=1,
callbacks=[ckpt],
num_sanity_val_steps=0,
log_every_n_steps=1,
limit_train_batches=2,
limit_val_batches=1,
enable_checkpointing=True,
enable_model_summary=False,
logger=False,
)
trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)
assert ckpt.best_model_score is not None
# Should equal the last (max) validation score
expected = max(val_scores)
actual = float(ckpt.best_model_score)
assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6), (
f"best_model_score should be {expected} (last/maximum val score), got {actual}.\n"
f"This indicates the checkpoint was saved before the validation metric was computed."
)
| ValMetricModule |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/aggregate.py | {
"start": 3807,
"end": 5585
} | class ____(GenericBase):
@staticmethod
def visit_eq(expression: Expression, value: str) -> Condition:
if value == "":
return does_not_contain(_nonempty_str(expression))
return contains(StringScalar.visit_eq(expression, value))
@staticmethod
def visit_neq(expression: Expression, value: str) -> Condition:
if value == "":
return contains(_nonempty_str(expression))
return does_not_contain(StringScalar.visit_eq(expression, value))
@staticmethod
def visit_match(expression: Expression, value: str) -> Condition:
# Assumes this is only called on wildcard strings, so `value` is non-empty.
return contains(StringScalar.visit_match(expression, value))
@staticmethod
def visit_not_match(expression: Expression, value: str) -> Condition:
# Assumes this is only called on wildcard strings, so `value` is non-empty.
return does_not_contain(StringScalar.visit_match(expression, value))
@staticmethod
def visit_in(expression: Expression, value_list: list[str]) -> Condition:
nonempty_case = contains(
StringScalar.visit_in(expression, [v for v in value_list if v != ""])
)
if "" in value_list:
return Or(conditions=[SumOfStringScalar.visit_eq(expression, ""), nonempty_case])
return nonempty_case
@staticmethod
def visit_not_in(expression: Expression, value_list: list[str]) -> Condition:
nonempty_case = does_not_contain(
StringScalar.visit_in(expression, [v for v in value_list if v != ""])
)
if "" in value_list:
return And(conditions=[SumOfStringScalar.visit_neq(expression, ""), nonempty_case])
return nonempty_case
| SumOfStringScalar |
python | PyCQA__pylint | tests/message/unittest_message_definition.py | {
"start": 1651,
"end": 5271
} | class ____:
@staticmethod
def assert_with_fail_msg(
msg: MessageDefinition,
expected: bool = True,
py_version: tuple[int, ...] | sys._version_info = sys.version_info,
) -> None:
fail_msg = (
f"With minversion='{msg.minversion}' and maxversion='{msg.maxversion}',"
f" and the py-version option being {py_version} "
"the message should{}be emitable"
)
if expected:
assert msg.may_be_emitted(py_version), fail_msg.format(" ")
else:
assert not msg.may_be_emitted(py_version), fail_msg.format(" not ")
@staticmethod
def get_message_definition() -> MessageDefinition:
return MessageDefinition(
FalseChecker(),
"W1234",
"message",
"description",
"msg-symbol",
WarningScope.NODE,
)
def test_may_be_emitted_default(self) -> None:
major = sys.version_info.major
minor = sys.version_info.minor
msg = self.get_message_definition()
self.assert_with_fail_msg(msg, expected=True)
msg.minversion = (major, minor - 1)
msg.maxversion = (major, minor + 1)
self.assert_with_fail_msg(msg, expected=True)
msg.minversion = (major, minor + 1)
self.assert_with_fail_msg(msg, expected=False)
msg.minversion = (major, minor - 1)
self.assert_with_fail_msg(msg, expected=True)
msg.maxversion = (major, minor - 1)
self.assert_with_fail_msg(msg, expected=False)
def test_may_be_emitted_py_version(self) -> None:
msg = self.get_message_definition()
self.assert_with_fail_msg(msg, expected=True, py_version=(3, 2))
msg.maxversion = (3, 5)
self.assert_with_fail_msg(msg, expected=True, py_version=(3, 2))
self.assert_with_fail_msg(msg, expected=False, py_version=(3, 5))
self.assert_with_fail_msg(msg, expected=False, py_version=(3, 6))
msg.maxversion = None
msg.minversion = (3, 9)
self.assert_with_fail_msg(msg, expected=True, py_version=(3, 9))
self.assert_with_fail_msg(msg, expected=True, py_version=(3, 10))
self.assert_with_fail_msg(msg, expected=False, py_version=(3, 8))
def test_repr(self) -> None:
msg = self.get_message_definition()
repr_str = str([msg, msg])
assert "W1234" in repr_str
assert "msg-symbol" in repr_str
expected = "[MessageDefinition:msg-symbol-one (W1234), MessageDefinition:msg-symbol-two (W1235)]"
assert str(FalseChecker().messages) == expected
def test_str(self) -> None:
msg = self.get_message_definition()
str_msg = str(msg)
assert "W1234" in str_msg
assert "msg-symbol" in str_msg
expected = """MessageDefinition:msg-symbol-one (W1234):
message one msg description"""
assert str(FalseChecker().messages[0]) == expected
def test_format_help(self) -> None:
msg = self.get_message_definition()
major = sys.version_info.major
minor = sys.version_info.minor
msg.minversion = (major, minor - 1)
msg.maxversion = (major, minor + 1)
format_str_checker_ref = msg.format_help(checkerref=False)
format_str = msg.format_help(checkerref=True)
assert str(minor - 1) in format_str
assert str(major + 1) in format_str_checker_ref
expected_format_help = """:msg-symbol-one (W1234): *message one*
msg description"""
assert FalseChecker().messages[0].format_help() == expected_format_help
| TestMessagesDefinition |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 129356,
"end": 129962
} | class ____(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(NotImplementedError, self._error()):
test_ops.old()
| DeprecatedTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass1.py | {
"start": 7834,
"end": 8398
} | class ____:
val1: int
val2: str
val3: float
def func5(subj: object):
match subj:
case Dataclass1(a, b):
reveal_type(a, expected_text="int")
reveal_type(b, expected_text="complex")
reveal_type(subj, expected_text="Dataclass1")
case Dataclass2(a, b, c):
reveal_type(a, expected_text="int")
reveal_type(b, expected_text="str")
reveal_type(c, expected_text="float")
reveal_type(subj, expected_text="Dataclass2")
@dataclass(match_args=False)
| Dataclass2 |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_structured.py | {
"start": 249,
"end": 3937
} | class ____(BaseTestZDType):
test_cls = Structured
valid_dtype = (
np.dtype([("field1", np.int32), ("field2", np.float64)]),
np.dtype([("field1", np.int64), ("field2", np.int32)]),
)
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype("|S10"),
)
valid_json_v2 = (
{"name": [["field1", ">i4"], ["field2", ">f8"]], "object_codec_id": None},
{"name": [["field1", ">i8"], ["field2", ">i4"]], "object_codec_id": None},
)
valid_json_v3 = (
{
"name": "structured",
"configuration": {
"fields": [
["field1", "int32"],
["field2", "float64"],
]
},
},
{
"name": "structured",
"configuration": {
"fields": [
[
"field1",
{
"name": "numpy.datetime64",
"configuration": {"unit": "s", "scale_factor": 1},
},
],
[
"field2",
{"name": "fixed_length_utf32", "configuration": {"length_bytes": 32}},
],
]
},
},
)
invalid_json_v2 = (
[("field1", "|i1"), ("field2", "|f8")],
[("field1", "|S10"), ("field2", "|f8")],
)
invalid_json_v3 = (
{
"name": "structured",
"configuration": {
"fields": [
("field1", {"name": "int32", "configuration": {"endianness": "invalid"}}),
("field2", {"name": "float64", "configuration": {"endianness": "big"}}),
]
},
},
{"name": "invalid_name"},
)
scalar_v2_params = (
(Structured(fields=(("field1", Int32()), ("field2", Float64()))), "AQAAAAAAAAAAAPA/"),
(Structured(fields=(("field1", Float16()), ("field2", Int32()))), "AQAAAAAA"),
)
scalar_v3_params = (
(Structured(fields=(("field1", Int32()), ("field2", Float64()))), "AQAAAAAAAAAAAPA/"),
(Structured(fields=(("field1", Int64()), ("field2", Int32()))), "AQAAAAAAAAAAAPA/"),
)
cast_value_params = (
(
Structured(fields=(("field1", Int32()), ("field2", Float64()))),
(1, 2.0),
np.array((1, 2.0), dtype=[("field1", np.int32), ("field2", np.float64)]),
),
(
Structured(fields=(("field1", Int64()), ("field2", Int32()))),
(3, 4.5),
np.array((3, 4.5), dtype=[("field1", np.int64), ("field2", np.int32)]),
),
)
item_size_params = (
Structured(fields=(("field1", Int32()), ("field2", Float64()))),
Structured(fields=(("field1", Int64()), ("field2", Int32()))),
)
invalid_scalar_params = (
(Structured(fields=(("field1", Int32()), ("field2", Float64()))), "i am a string"),
(Structured(fields=(("field1", Int32()), ("field2", Float64()))), {"type": "dict"}),
)
def scalar_equals(self, scalar1: Any, scalar2: Any) -> bool:
if hasattr(scalar1, "shape") and hasattr(scalar2, "shape"):
return np.array_equal(scalar1, scalar2)
return super().scalar_equals(scalar1, scalar2)
def test_invalid_size() -> None:
"""
Test that it's impossible to create a data type that has no fields
"""
fields = ()
msg = f"must have at least one field. Got {fields!r}"
with pytest.raises(ValueError, match=msg):
Structured(fields=fields)
| TestStructured |
python | django-haystack__django-haystack | haystack/backends/elasticsearch7_backend.py | {
"start": 20322,
"end": 20448
} | class ____(BaseEngine):
backend = Elasticsearch7SearchBackend
query = Elasticsearch7SearchQuery
| Elasticsearch7SearchEngine |
python | realpython__materials | gemini-cli/todolist/src/todolist/database.py | {
"start": 474,
"end": 980
} | class ____(Model):
emoji = TextField(null=True)
name = TextField(null=False)
done = BooleanField(null=False, default=False)
task_list = ForeignKeyField(TaskList, backref="tasks", on_delete="CASCADE")
class Meta:
database = db
table_name = "tasks"
@cached_property
def pretty_name(self) -> str:
if self.emoji:
return f"{self.emoji} {self.name}"
else:
return str(self.name)
db.create_tables([TaskList, Task], safe=True)
| Task |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 26987,
"end": 27264
} | class ____(BaseModel):
TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self):
curr_time = datetime.now().strftime(TimestampedResource.TIMESTAMP_FORMAT)
self.creation_time = curr_time
self.last_modified_time = curr_time
| TimestampedResource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/sponsored_products_report_request_builder.py | {
"start": 280,
"end": 6536
} | class ____(AmazonAdsBaseRequestBuilder):
@classmethod
def _init_report_endpoint(
cls,
client_id: str,
client_access_token: str,
profile_id: str,
report_type: str,
metrics: List[str],
report_date: Optional[str] = None,
) -> "SponsoredProductsReportRequestBuilder":
return (
cls(f"reporting/reports")
.with_client_id(client_id)
.with_client_access_token(client_access_token)
.with_profile_id(profile_id)
.with_metrics(metrics)
.with_report_date(report_date)
.with_report_type(report_type)
)
@classmethod
def init_campaigns_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "campaigns", report_date, metrics)
@classmethod
def init_ad_groups_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "adGroups", report_date, metrics)
@classmethod
def init_keywords_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "keywords", report_date, metrics)
@classmethod
def init_targets_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "targets", report_date, metrics)
@classmethod
def init_product_ads_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "productAds", report_date, metrics)
@classmethod
def init_asins_keywords_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "asins_keywords", report_date, metrics)
@classmethod
def init_asins_targets_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredProductsReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "asins_targets", report_date, metrics)
def __init__(self, resource: str) -> None:
super().__init__(resource)
self._metrics: List[str] = None
self._report_date: str = None
self._report_type: str = None
@property
def _report_config_group_by(self) -> List[str]:
return {
"campaigns": ["campaign"],
"adGroups": ["campaign", "adGroup"],
"keywords": ["targeting"],
"targets": ["targeting"],
"productAds": ["advertiser"],
"asins_keywords": ["asin"],
"asins_targets": ["asin"],
}[self._report_type]
@property
def _report_config_report_type_id(self) -> str:
return {
"campaigns": "spCampaigns",
"adGroups": "spCampaigns",
"keywords": "spTargeting",
"targets": "spTargeting",
"productAds": "spAdvertisedProduct",
"asins_keywords": "spPurchasedProduct",
"asins_targets": "spPurchasedProduct",
}[self._report_type]
@property
def _report_config_filters(self) -> List[str]:
return {
"campaigns": [],
"adGroups": [],
"keywords": [{"field": "keywordType", "values": ["BROAD", "PHRASE", "EXACT"]}],
"targets": [{"field": "keywordType", "values": ["TARGETING_EXPRESSION", "TARGETING_EXPRESSION_PREDEFINED"]}],
"productAds": [],
"asins_keywords": [],
"asins_targets": [],
}[self._report_type]
@property
def query_params(self) -> Dict[str, Any]:
return None
@property
def request_body(self) -> Optional[str]:
body: dict = OrderedDict()
if self._report_type and self._report_date:
body["name"] = f"{self._report_type} report {self._report_date}"
if self._report_date:
body["startDate"] = self._report_date
body["endDate"] = self._report_date
if self._report_type:
body["configuration"] = {"adProduct": "SPONSORED_PRODUCTS", "groupBy": self._report_config_group_by}
if self._metrics:
body["configuration"]["columns"] = self._metrics
if self._report_type:
body["configuration"]["reportTypeId"] = self._report_config_report_type_id
body["configuration"]["filters"] = self._report_config_filters
body["configuration"]["timeUnit"] = "SUMMARY"
body["configuration"]["format"] = "GZIP_JSON"
return json.dumps(body)
def with_report_date(self, report_date: AirbyteDateTime) -> "SponsoredProductsReportRequestBuilder":
self._report_date = report_date.strftime("%Y-%m-%d")
return self
def with_report_type(self, report_type: str) -> "SponsoredProductsReportRequestBuilder":
self._report_type = report_type
return self
def with_tactics(self, tactics: str) -> "SponsoredProductsReportRequestBuilder":
self._tactics = tactics
return self
def with_metrics(self, metrics: List[str]) -> "SponsoredProductsReportRequestBuilder":
self._metrics = metrics
return self
| SponsoredProductsReportRequestBuilder |
python | spyder-ide__spyder | spyder/widgets/mixins.py | {
"start": 63357,
"end": 65111
} | class ____(object):
INITHISTORY = None
SEPARATOR = None
HISTORY_FILENAMES = []
sig_append_to_history_requested = None
def __init__(self, history_filename=''):
self.history_filename = history_filename
self.create_history_filename()
def create_history_filename(self):
"""Create history_filename with INITHISTORY if it doesn't exist."""
if self.history_filename and not osp.isfile(self.history_filename):
try:
encoding.writelines(self.INITHISTORY, self.history_filename)
except EnvironmentError:
pass
def add_to_history(self, command):
"""Add command to history"""
command = str(command)
if command in ['', '\n'] or command.startswith('Traceback'):
return
if command.endswith('\n'):
command = command[:-1]
self.histidx = None
if len(self.history) > 0 and self.history[-1] == command:
return
self.history.append(command)
text = os.linesep + command
# When the first entry will be written in history file,
# the separator will be append first:
if self.history_filename not in self.HISTORY_FILENAMES:
self.HISTORY_FILENAMES.append(self.history_filename)
text = self.SEPARATOR + text
# Needed to prevent errors when writing history to disk
# See spyder-ide/spyder#6431.
try:
encoding.write(text, self.history_filename, mode='ab')
except EnvironmentError:
pass
if self.sig_append_to_history_requested is not None:
self.sig_append_to_history_requested.emit(
self.history_filename, text)
| SaveHistoryMixin |
python | cython__cython | tests/run/pyclass_scope_T671.py | {
"start": 133,
"end": 1625
} | class ____(object):
"""
>>> SimpleRewrite.A
4321
"""
A = 4321
A = A
def simple_inner(a):
"""
>>> simple_inner(4321).A
1234
"""
A = a
class X(object):
A = A
return X
def conditional(a, cond):
"""
>>> conditional(4321, False).A
1234
>>> conditional(4321, True).A
4321
"""
class X(object):
if cond:
A = a
A = A
return X
def name_error():
"""
>>> name_error() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...B...
"""
class X(object):
B = B
def conditional_name_error(cond):
"""
>>> conditional_name_error(True).B
4321
>>> conditional_name_error(False).B #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...B...
"""
class X(object):
if cond:
B = 4321
B = B
return X
C = 1111
del C
def name_error_deleted():
"""
>>> name_error_deleted() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...C...
"""
class X(object):
C = C
_set = set
def name_lookup_order():
"""
>>> Scope = name_lookup_order()
>>> Scope().set(2)
42
>>> Scope.test1 == _set()
True
>>> Scope.test2 == _set()
True
"""
class Scope(object):
test1 = set()
test2 = set()
def set(self, x):
return 42
return Scope
| SimpleRewrite |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 1301,
"end": 3313
} | class ____(ABC):
"""Abstract base class for referrer policies."""
NOREFERRER_SCHEMES: tuple[str, ...] = LOCAL_SCHEMES
name: str
@abstractmethod
def referrer(self, response_url: str, request_url: str) -> str | None:
raise NotImplementedError
def stripped_referrer(self, url: str) -> str | None:
if urlparse(url).scheme not in self.NOREFERRER_SCHEMES:
return self.strip_url(url)
return None
def origin_referrer(self, url: str) -> str | None:
if urlparse(url).scheme not in self.NOREFERRER_SCHEMES:
return self.origin(url)
return None
def strip_url(self, url: str, origin_only: bool = False) -> str | None:
"""
https://www.w3.org/TR/referrer-policy/#strip-url
If url is null, return no referrer.
If url's scheme is a local scheme, then return no referrer.
Set url's username to the empty string.
Set url's password to null.
Set url's fragment to null.
If the origin-only flag is true, then:
Set url's path to null.
Set url's query to null.
Return url.
"""
if not url:
return None
return strip_url(
url,
strip_credentials=True,
strip_fragment=True,
strip_default_port=True,
origin_only=origin_only,
)
def origin(self, url: str) -> str | None:
"""Return serialized origin (scheme, host, path) for a request or response URL."""
return self.strip_url(url, origin_only=True)
def potentially_trustworthy(self, url: str) -> bool:
# Note: this does not follow https://w3c.github.io/webappsec-secure-contexts/#is-url-trustworthy
parsed_url = urlparse(url)
if parsed_url.scheme in ("data",):
return False
return self.tls_protected(url)
def tls_protected(self, url: str) -> bool:
return urlparse(url).scheme in ("https", "ftps")
| ReferrerPolicy |
python | pandas-dev__pandas | pandas/core/arrays/integer.py | {
"start": 5315,
"end": 5504
} | class ____(IntegerDtype):
type = np.int16
name: ClassVar[str] = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
@set_module("pandas")
| Int16Dtype |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 58379,
"end": 58593
} | class ____(TreeTestCase):
# https://github.com/django-mptt/django-mptt/issues/175
def test_save_auto_now_date_field_model(self):
a = AutoNowDateFieldModel()
a.save()
| TestAutoNowDateFieldModel |
python | scipy__scipy | scipy/io/arff/_arffread.py | {
"start": 9703,
"end": 19502
} | class ____(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'relational'
self.dtype = np.object_
self.attributes = []
self.dialect = None
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('relational')] == 'relational':
return cls(name)
else:
return None
def parse_data(self, data_str):
# Copy-pasted
elems = list(range(len(self.attributes)))
escaped_string = data_str.encode().decode("unicode-escape")
row_tuples = []
for raw in escaped_string.split("\n"):
row, self.dialect = split_data_line(raw, self.dialect)
row_tuples.append(tuple(
[self.attributes[i].parse_data(row[i]) for i in elems]))
return np.array(row_tuples,
[(a.name, a.dtype) for a in self.attributes])
def __str__(self):
return (super().__str__() + '\n\t' +
'\n\t'.join(str(a) for a in self.attributes))
# -----------------
# Various utilities
# -----------------
def to_attribute(name, attr_string):
attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
StringAttribute, RelationalAttribute)
for cls in attr_classes:
attr = cls.parse_attribute(name, attr_string)
if attr is not None:
return attr
raise ParseArffError(f"unknown attribute {attr_string}")
def csv_sniffer_has_bug_last_field():
"""
Checks if the bug https://bugs.python.org/issue30157 is unpatched.
"""
# We only compute this once.
has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
if has_bug is None:
dialect = csv.Sniffer().sniff("3, 'a'")
csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
has_bug = csv_sniffer_has_bug_last_field.has_bug
return has_bug
def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
"""
Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
"""
if csv_sniffer_has_bug_last_field():
# Reuses code from the csv module
right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)' # noqa: E501
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?", # noqa: E501
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?", # noqa: E501
right_regex, # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) # noqa: E501
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(sniff_line)
if matches:
break
# If it does not match the expression that was bugged,
# then this bug does not apply
if restr != right_regex:
return
groupindex = regexp.groupindex
# There is only one end of the string
assert len(matches) == 1
m = matches[0]
n = groupindex['quote'] - 1
quote = m[n]
n = groupindex['delim'] - 1
delim = m[n]
n = groupindex['space'] - 1
space = bool(m[n])
dq_regexp = re.compile(
rf"(({re.escape(delim)})|^)\W*{quote}[^{re.escape(delim)}\n]*{quote}[^{re.escape(delim)}\n]*{quote}\W*(({re.escape(delim)})|$)", re.MULTILINE # noqa: E501
)
doublequote = bool(dq_regexp.search(sniff_line))
dialect.quotechar = quote
if delim in delimiters:
dialect.delimiter = delim
dialect.doublequote = doublequote
dialect.skipinitialspace = space
def split_data_line(line, dialect=None):
delimiters = ",\t"
# This can not be done in a per reader basis, and relational fields
# can be HUGE
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Remove the line end if any
if line[-1] == '\n':
line = line[:-1]
# Remove potential trailing whitespace
line = line.strip()
sniff_line = line
# Add a delimiter if none is present, so that the csv.Sniffer
# does not complain for a single-field CSV.
if not any(d in line for d in delimiters):
sniff_line += ","
if dialect is None:
dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
dialect=dialect,
delimiters=delimiters)
row = next(csv.reader([line], dialect))
return row, dialect
# --------------
# Parsing header
# --------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (e.g., starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> from scipy.io.arff._arffread import tokenize_attribute
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
else:
raise ValueError(f"First line unparsable: {sattr}")
attribute = to_attribute(name, type)
if type.lower() == 'relational':
next_item = read_relational_attribute(iterable, attribute, next_item)
# raise ValueError("relational attributes not supported yet")
return attribute, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError as e:
raise ValueError("Error while tokenizing attribute") from e
else:
raise ValueError(f"Error while tokenizing single {val}")
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError as e:
raise ValueError("Error while tokenizing attribute") from e
else:
raise ValueError(f"Error while tokenizing single {val}")
return name, type
def read_relational_attribute(ofile, relational_attribute, i):
"""Read the nested attributes of a relational attribute"""
r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
relational_attribute.name + r'\s*$')
while not r_end_relational.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
relational_attribute.attributes.append(attr)
else:
raise ValueError(f"Error parsing line {i}")
else:
i = next(ofile)
i = next(ofile)
return i
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
attributes.append(attr)
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError(f"Error parsing line {i}")
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
| RelationalAttribute |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 62200,
"end": 62390
} | class ____(themeable):
"""
Horizontal spacing between two entries in a legend
Parameters
----------
theme_element : int
Size in points
"""
| legend_key_spacing_x |
python | mkdocs__mkdocs | mkdocs/structure/pages.py | {
"start": 20522,
"end": 21104
} | class ____(markdown.preprocessors.Preprocessor):
def __init__(self) -> None:
super().__init__()
self.present_anchor_ids: set[str] = set()
def run(self, lines: list[str]) -> list[str]:
parser = _HTMLHandler()
parser.feed('\n'.join(lines))
parser.close()
self.present_anchor_ids = parser.present_anchor_ids
return lines
def _register(self, md: markdown.Markdown) -> None:
md.preprocessors.register(
self, "mkdocs_raw_html", priority=21 # Right before 'html_block'.
)
| _RawHTMLPreprocessor |
python | doocs__leetcode | solution/1200-1299/1230.Toss Strange Coins/Solution.py | {
"start": 0,
"end": 425
} | class ____:
def probabilityOfHeads(self, prob: List[float], target: int) -> float:
n = len(prob)
f = [[0] * (target + 1) for _ in range(n + 1)]
f[0][0] = 1
for i, p in enumerate(prob, 1):
for j in range(min(i, target) + 1):
f[i][j] = (1 - p) * f[i - 1][j]
if j:
f[i][j] += p * f[i - 1][j - 1]
return f[n][target]
| Solution |
python | Pylons__pyramid | tests/test_scripts/test_prequest.py | {
"start": 8832,
"end": 9086
} | class ____(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.prequest import main
return main(argv, True)
def test_it(self):
result = self._callFUT(['prequest'])
self.assertEqual(result, 2)
| Test_main |
python | aio-libs__aiohttp | aiohttp/web_urldispatcher.py | {
"start": 29487,
"end": 30202
} | class ____(AbstractView):
async def _iter(self) -> StreamResponse:
if self.request.method not in hdrs.METH_ALL:
self._raise_allowed_methods()
method: Callable[[], Awaitable[StreamResponse]] | None = getattr(
self, self.request.method.lower(), None
)
if method is None:
self._raise_allowed_methods()
return await method()
def __await__(self) -> Generator[None, None, StreamResponse]:
return self._iter().__await__()
def _raise_allowed_methods(self) -> NoReturn:
allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())}
raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
| View |
python | catalyst-team__catalyst | catalyst/contrib/datasets/imagewoof.py | {
"start": 466,
"end": 928
} | class ____(ImageClassificationDataset):
"""
`Imagewoof <https://github.com/fastai/imagenette#imagewoof>`_ Dataset
with images resized so that the shortest size is 160 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewoof2-160"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewoof2-160.tgz",
"3d200a7be99704a0d7509be2a9fbfe15",
)
]
| Imagewoof160 |
python | Textualize__rich | tests/test_progress.py | {
"start": 627,
"end": 23767
} | class ____:
"""A clock that is manually advanced."""
def __init__(self, time=0.0, auto=True) -> None:
self.time = time
self.auto = auto
def __call__(self) -> float:
try:
return self.time
finally:
if self.auto:
self.time += 1
def tick(self, advance: float = 1) -> None:
self.time += advance
def test_bar_columns():
bar_column = BarColumn(100)
assert bar_column.bar_width == 100
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
bar = bar_column(task)
assert isinstance(bar, ProgressBar)
assert bar.completed == 20
assert bar.total == 100
def test_text_column():
text_column = TextColumn("[b]foo", highlighter=NullHighlighter())
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
text = text_column.render(task)
assert str(text) == "foo"
text_column = TextColumn("[b]bar", markup=False)
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
text = text_column.render(task)
assert text == Text("[b]bar")
def test_time_elapsed_column():
column = TimeElapsedColumn()
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
text = column.render(task)
assert str(text) == "-:--:--"
def test_time_remaining_column():
class FakeTask(Task):
time_remaining = 60
column = TimeRemainingColumn()
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
text = column(task)
assert str(text) == "-:--:--"
text = column(FakeTask(1, "test", 100, 20, _get_time=lambda: 1.0))
assert str(text) == "0:01:00"
@pytest.mark.parametrize(
"task_time, formatted",
[
(None, "--:--"),
(0, "00:00"),
(59, "00:59"),
(71, "01:11"),
(4210, "1:10:10"),
],
)
def test_compact_time_remaining_column(task_time, formatted):
task = SimpleNamespace(finished=False, time_remaining=task_time, total=100)
column = TimeRemainingColumn(compact=True)
assert str(column.render(task)) == formatted
def test_time_remaining_column_elapsed_when_finished():
task_time = 71
formatted = "0:01:11"
task = SimpleNamespace(finished=True, finished_time=task_time, total=100)
column = TimeRemainingColumn(elapsed_when_finished=True)
assert str(column.render(task)) == formatted
def test_renderable_column():
column = RenderableColumn("foo")
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
assert column.render(task) == "foo"
def test_spinner_column():
time = 1.0
def get_time():
nonlocal time
return time
column = SpinnerColumn()
column.set_spinner("dots2")
task = Task(1, "test", 100, 20, _get_time=get_time)
result = column.render(task)
print(repr(result))
expected = "⣾"
assert str(result) == expected
time += 1.0
column.spinner.update(speed=0.5)
result = column.render(task)
print(repr(result))
expected = "⡿"
assert str(result) == expected
def test_download_progress_uses_decimal_units() -> None:
column = DownloadColumn()
test_task = Task(1, "test", 1000, 500, _get_time=lambda: 1.0)
rendered_progress = str(column.render(test_task))
expected = "0.5/1.0 kB"
assert rendered_progress == expected
def test_download_progress_uses_binary_units() -> None:
column = DownloadColumn(binary_units=True)
test_task = Task(1, "test", 1024, 512, _get_time=lambda: 1.0)
rendered_progress = str(column.render(test_task))
expected = "0.5/1.0 KiB"
assert rendered_progress == expected
def test_task_ids():
progress = make_progress()
assert progress.task_ids == [0, 1, 2, 4]
def test_finished():
progress = make_progress()
assert not progress.finished
def make_progress() -> Progress:
_time = 0.0
def fake_time():
nonlocal _time
try:
return _time
finally:
_time += 1
console = Console(
file=io.StringIO(),
force_terminal=True,
color_system="truecolor",
width=80,
legacy_windows=False,
_environ={},
)
progress = Progress(console=console, get_time=fake_time, auto_refresh=False)
task1 = progress.add_task("foo")
task2 = progress.add_task("bar", total=30)
progress.advance(task2, 16)
task3 = progress.add_task("baz", visible=False)
task4 = progress.add_task("egg")
progress.remove_task(task4)
task4 = progress.add_task("foo2", completed=50, start=False)
progress.stop_task(task4)
progress.start_task(task4)
progress.update(
task4, total=200, advance=50, completed=200, visible=True, refresh=True
)
progress.stop_task(task4)
return progress
def render_progress() -> str:
progress = make_progress()
progress.start() # superfluous noop
with progress:
pass
progress.stop() # superfluous noop
progress_render = progress.console.file.getvalue()
return progress_render
def test_expand_bar() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=10,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
BarColumn(bar_width=None),
console=console,
get_time=lambda: 1.0,
auto_refresh=False,
)
progress.add_task("foo")
with progress:
pass
expected = "\x1b[?25l\x1b[38;5;237m━━━━━━━━━━\x1b[0m\r\x1b[2K\x1b[38;5;237m━━━━━━━━━━\x1b[0m\n\x1b[?25h"
render_result = console.file.getvalue()
print("RESULT\n", repr(render_result))
print("EXPECTED\n", repr(expected))
assert render_result == expected
def test_progress_with_none_total_renders_a_pulsing_bar() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=10,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
BarColumn(bar_width=None),
console=console,
get_time=lambda: 1.0,
auto_refresh=False,
)
progress.add_task("foo", total=None)
with progress:
pass
expected = "\x1b[?25l\x1b[38;2;153;48;86m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;249;38;114m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\r\x1b[2K\x1b[38;2;153;48;86m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;249;38;114m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\n\x1b[?25h"
render_result = console.file.getvalue()
print("RESULT\n", repr(render_result))
print("EXPECTED\n", repr(expected))
assert render_result == expected
def test_render() -> None:
expected = "\x1b[?25lfoo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\nbar \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 53%\x1b[0m \x1b[36m-:--:--\x1b[0m\nfoo2 \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2Kfoo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\nbar \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 53%\x1b[0m \x1b[36m-:--:--\x1b[0m\nfoo2 \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\n\x1b[?25h"
render_result = render_progress()
print(repr(render_result))
assert render_result == expected
def test_track() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=60,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
test = ["foo", "bar", "baz"]
expected_values = iter(test)
for value in track(
test, "test", console=console, auto_refresh=False, get_time=MockClock(auto=True)
):
assert value == next(expected_values)
result = console.file.getvalue()
print(repr(result))
expected = "\x1b[?25l\r\x1b[2Ktest \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\r\x1b[2Ktest \x1b[38;2;249;38;114m━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 33%\x1b[0m \x1b[36m-:--:--\x1b[0m\r\x1b[2Ktest \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;2;249;38;114m╸\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━\x1b[0m \x1b[35m 67%\x1b[0m \x1b[36m0:00:06\x1b[0m\r\x1b[2Ktest \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[33m0:00:19\x1b[0m\r\x1b[2Ktest \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[33m0:00:19\x1b[0m\n\x1b[?25h"
print("--")
print("RESULT:")
print(result)
print(repr(result))
print("EXPECTED:")
print(expected)
print(repr(expected))
assert result == expected
def test_progress_track() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=60,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
console=console, auto_refresh=False, get_time=MockClock(auto=True)
)
test = ["foo", "bar", "baz"]
expected_values = iter(test)
with progress:
for value in progress.track(test, description="test"):
assert value == next(expected_values)
result = console.file.getvalue()
print(repr(result))
expected = "\x1b[?25l\r\x1b[2Ktest \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\r\x1b[2Ktest \x1b[38;2;249;38;114m━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 33%\x1b[0m \x1b[36m-:--:--\x1b[0m\r\x1b[2Ktest \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;2;249;38;114m╸\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━\x1b[0m \x1b[35m 67%\x1b[0m \x1b[36m0:00:06\x1b[0m\r\x1b[2Ktest \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\r\x1b[2Ktest \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\n\x1b[?25h"
print(expected)
print(repr(expected))
print(result)
print(repr(result))
assert result == expected
def test_columns() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=80,
log_time_format="[TIME]",
color_system="truecolor",
legacy_windows=False,
log_path=False,
_environ={},
)
progress = Progress(
"test",
TextColumn("{task.description}"),
BarColumn(bar_width=None),
TimeRemainingColumn(),
TimeElapsedColumn(),
FileSizeColumn(),
TotalFileSizeColumn(),
DownloadColumn(),
TransferSpeedColumn(),
MofNCompleteColumn(),
MofNCompleteColumn(separator=" of "),
transient=True,
console=console,
auto_refresh=False,
get_time=MockClock(),
)
task1 = progress.add_task("foo", total=10)
task2 = progress.add_task("bar", total=7)
with progress:
for n in range(4):
progress.advance(task1, 3)
progress.advance(task2, 4)
print("foo")
console.log("hello")
console.print("world")
progress.refresh()
from .render import replace_link_ids
result = replace_link_ids(console.file.getvalue())
print(repr(result))
expected = "\x1b[?25ltest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kfoo\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0mhello \ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kworld\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \r\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \n\x1b[?25h\r\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K"
assert result == expected
def test_using_default_columns() -> None:
# can only check types, as the instances do not '==' each other
expected_default_types = [
TextColumn,
BarColumn,
TaskProgressColumn,
TimeRemainingColumn,
]
progress = Progress()
assert [type(c) for c in progress.columns] == expected_default_types
progress = Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
"Elapsed:",
TimeElapsedColumn(),
)
assert [type(c) for c in progress.columns] == [
SpinnerColumn,
*expected_default_types,
str,
TimeElapsedColumn,
]
def test_task_create() -> None:
task = Task(TaskID(1), "foo", 100, 0, _get_time=lambda: 1)
assert task.elapsed is None
assert not task.finished
assert task.percentage == 0.0
assert task.speed is None
assert task.time_remaining is None
def test_task_start() -> None:
current_time = 1
def get_time():
nonlocal current_time
return current_time
task = Task(TaskID(1), "foo", 100, 0, _get_time=get_time)
task.start_time = get_time()
assert task.started == True
assert task.elapsed == 0
current_time += 1
assert task.elapsed == 1
current_time += 1
task.stop_time = get_time()
current_time += 1
assert task.elapsed == 2
def test_task_zero_total() -> None:
task = Task(TaskID(1), "foo", 0, 0, _get_time=lambda: 1)
assert task.percentage == 0
def test_progress_create() -> None:
progress = Progress()
assert progress.finished
assert progress.tasks == []
assert progress.task_ids == []
def test_track_thread() -> None:
progress = Progress()
task_id = progress.add_task("foo")
track_thread = _TrackThread(progress, task_id, 0.1)
assert track_thread.completed == 0
from time import sleep
with track_thread:
track_thread.completed = 1
sleep(0.3)
assert progress.tasks[task_id].completed >= 1
track_thread.completed += 1
def test_reset() -> None:
progress = Progress()
task_id = progress.add_task("foo")
progress.advance(task_id, 1)
progress.advance(task_id, 1)
progress.advance(task_id, 1)
progress.advance(task_id, 7)
task = progress.tasks[task_id]
assert task.completed == 10
progress.reset(
task_id,
total=200,
completed=20,
visible=False,
description="bar",
example="egg",
)
assert task.total == 200
assert task.completed == 20
assert task.visible == False
assert task.description == "bar"
assert task.fields == {"example": "egg"}
assert not task._progress
def test_progress_max_refresh() -> None:
"""Test max_refresh argument."""
time = 0.0
def get_time() -> float:
nonlocal time
try:
return time
finally:
time = time + 1.0
console = Console(
color_system=None,
width=80,
legacy_windows=False,
force_terminal=True,
_environ={},
)
column = TextColumn("{task.description}")
column.max_refresh = 3
progress = Progress(
column,
get_time=get_time,
auto_refresh=False,
console=console,
)
console.begin_capture()
with progress:
task_id = progress.add_task("start")
for tick in range(6):
progress.update(task_id, description=f"tick {tick}")
progress.refresh()
result = console.end_capture()
print(repr(result))
assert (
result
== "\x1b[?25l\r\x1b[2Kstart\r\x1b[2Kstart\r\x1b[2Ktick 1\r\x1b[2Ktick 1\r\x1b[2Ktick 3\r\x1b[2Ktick 3\r\x1b[2Ktick 5\r\x1b[2Ktick 5\n\x1b[?25h"
)
def test_live_is_started_if_progress_is_enabled() -> None:
progress = Progress(auto_refresh=False, disable=False)
with progress:
assert progress.live._started
def test_live_is_not_started_if_progress_is_disabled() -> None:
progress = Progress(auto_refresh=False, disable=True)
with progress:
assert not progress.live._started
def test_no_output_if_progress_is_disabled() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=60,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
console=console,
disable=True,
)
test = ["foo", "bar", "baz"]
expected_values = iter(test)
with progress:
for value in progress.track(test, description="test"):
assert value == next(expected_values)
result = console.file.getvalue()
print(repr(result))
expected = ""
assert result == expected
def test_open() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=60,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
console=console,
)
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, "wb") as f:
f.write(b"Hello, World!")
try:
with rich.progress.open(filename) as f:
assert f.read() == "Hello, World!"
assert f.closed
finally:
os.remove(filename)
def test_open_text_mode() -> None:
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, "wb") as f:
f.write(b"Hello, World!")
try:
with rich.progress.open(filename, "r") as f:
assert f.read() == "Hello, World!"
assert f.name == filename
assert f.closed
finally:
os.remove(filename)
def test_wrap_file() -> None:
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, "wb") as f:
total = f.write(b"Hello, World!")
try:
with open(filename, "rb") as file:
with rich.progress.wrap_file(file, total=total) as f:
assert f.read() == b"Hello, World!"
assert f.mode == "rb"
assert f.name == filename
assert f.closed
assert not f.handle.closed
assert not file.closed
assert file.closed
finally:
os.remove(filename)
def test_wrap_file_task_total() -> None:
console = Console(
file=io.StringIO(),
force_terminal=True,
width=60,
color_system="truecolor",
legacy_windows=False,
_environ={},
)
progress = Progress(
console=console,
)
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, "wb") as f:
total = f.write(b"Hello, World!")
try:
with progress:
with open(filename, "rb") as file:
task_id = progress.add_task("Reading", total=total)
with progress.wrap_file(file, task_id=task_id) as f:
assert f.read() == b"Hello, World!"
finally:
os.remove(filename)
def test_task_progress_column_speed() -> None:
speed_text = TaskProgressColumn.render_speed(None)
assert speed_text.plain == ""
speed_text = TaskProgressColumn.render_speed(5)
assert speed_text.plain == "5.0 it/s"
speed_text = TaskProgressColumn.render_speed(5000)
assert speed_text.plain == "5.0×10³ it/s"
speed_text = TaskProgressColumn.render_speed(8888888)
assert speed_text.plain == "8.9×10⁶ it/s"
if __name__ == "__main__":
_render = render_progress()
print(_render)
print(repr(_render))
| MockClock |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_object.py | {
"start": 497,
"end": 2421
} | class ____:
def test_comparison_object_numeric_nas(self, comparison_op):
ser = Series(np.random.default_rng(2).standard_normal(10), dtype=object)
shifted = ser.shift(2)
func = comparison_op
result = func(ser, shifted)
expected = func(ser.astype(float), shifted.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
def test_object_comparisons(self, infer_string):
with option_context("future.infer_string", infer_string):
ser = Series(["a", "b", np.nan, "c", "a"])
result = ser == "a"
expected = Series([True, False, False, False, True])
tm.assert_series_equal(result, expected)
result = ser < "a"
expected = Series([False, False, False, False, False])
tm.assert_series_equal(result, expected)
result = ser != "a"
expected = -(ser == "a")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_more_na_comparisons(self, dtype):
left = Series(["a", np.nan, "c"], dtype=dtype)
right = Series(["a", np.nan, "d"], dtype=dtype)
result = left == right
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
tm.assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
tm.assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Arithmetic
| TestObjectComparisons |
python | spack__spack | lib/spack/spack/variant.py | {
"start": 28924,
"end": 29239
} | class ____(spack.error.SpecError):
"""Raised if the wrong validator is used to validate a variant."""
def __init__(self, vspec, variant):
msg = 'trying to validate variant "{0.name}" ' 'with the validator of "{1.name}"'
super().__init__(msg.format(vspec, variant))
| InconsistentValidationError |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 16847,
"end": 19437
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.authentication import RemoteUserAuthenticationPolicy
return RemoteUserAuthenticationPolicy
def _makeOne(self, environ_key='REMOTE_USER', callback=None):
return self._getTargetClass()(environ_key, callback)
def test_class_implements_IAuthenticationPolicy(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IAuthenticationPolicy
verifyClass(IAuthenticationPolicy, self._getTargetClass())
def test_instance_implements_IAuthenticationPolicy(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IAuthenticationPolicy
verifyObject(IAuthenticationPolicy, self._makeOne())
def test_unauthenticated_userid_returns_None(self):
request = DummyRequest({})
policy = self._makeOne()
self.assertEqual(policy.unauthenticated_userid(request), None)
def test_unauthenticated_userid(self):
request = DummyRequest({'REMOTE_USER': 'fred'})
policy = self._makeOne()
self.assertEqual(policy.unauthenticated_userid(request), 'fred')
def test_authenticated_userid_None(self):
request = DummyRequest({})
policy = self._makeOne()
self.assertEqual(policy.authenticated_userid(request), None)
def test_authenticated_userid(self):
request = DummyRequest({'REMOTE_USER': 'fred'})
policy = self._makeOne()
self.assertEqual(policy.authenticated_userid(request), 'fred')
def test_effective_principals_None(self):
from pyramid.authorization import Everyone
request = DummyRequest({})
policy = self._makeOne()
self.assertEqual(policy.effective_principals(request), [Everyone])
def test_effective_principals(self):
from pyramid.authorization import Authenticated, Everyone
request = DummyRequest({'REMOTE_USER': 'fred'})
policy = self._makeOne()
self.assertEqual(
policy.effective_principals(request),
[Everyone, Authenticated, 'fred'],
)
def test_remember(self):
request = DummyRequest({'REMOTE_USER': 'fred'})
policy = self._makeOne()
result = policy.remember(request, 'fred')
self.assertEqual(result, [])
def test_forget(self):
request = DummyRequest({'REMOTE_USER': 'fred'})
policy = self._makeOne()
result = policy.forget(request)
self.assertEqual(result, [])
| TestRemoteUserAuthenticationPolicy |
python | openai__openai-python | src/openai/types/beta/thread_create_and_run_params.py | {
"start": 14142,
"end": 14464
} | class ____(ThreadCreateAndRunParamsBase, total=False):
stream: Optional[Literal[False]]
"""
If `true`, returns a stream of events that happen during the Run as server-sent
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
"""
| ThreadCreateAndRunParamsNonStreaming |
python | ipython__ipython | IPython/testing/plugin/ipdoctest.py | {
"start": 2939,
"end": 11412
} | class ____(doctest.DocTestParser):
"""
A class used to parse strings containing doctest examples.
Note: This is a version modified to properly recognize IPython input and
convert any IPython examples into valid Python ones.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# Classic Python prompts or default IPython ones
_PS1_PY = r'>>>'
_PS2_PY = r'\.\.\.'
_PS1_IP = r'In\ \[\d+\]:'
_PS2_IP = r'\ \ \ \.\.\.+:'
_RE_TPL = r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
(?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
\n? # a newline
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*%s) # Not a line starting with PS1
(?![ ]*%s) # Not a line starting with PS2
.*$\n? # But any other line
)*)
'''
_EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
re.MULTILINE | re.VERBOSE)
_EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
re.MULTILINE | re.VERBOSE)
# Mark a test as being fully random. In this case, we simply append the
# random marker ('#random') to each individual example's output. This way
# we don't need to modify any other code.
_RANDOM_TEST = re.compile(r'#\s*all-random\s+')
def ip2py(self,source):
"""Convert input IPython source into valid Python."""
block = _ip.input_transformer_manager.transform_cell(source)
if len(block.splitlines()) == 1:
return _ip.prefilter(block)
else:
return block
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
# print('Parse string:\n',string) # dbg
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# We make 'all random' tests by adding the '# random' mark to every
# block of output in the test.
if self._RANDOM_TEST.search(string):
random_marker = '\n# random'
else:
random_marker = ''
# Whether to convert the input from ipython to python syntax
ip2py = False
# Find all doctest examples in the string. First, try them as Python
# examples, then as IPython ones
terms = list(self._EXAMPLE_RE_PY.finditer(string))
if terms:
# Normal Python example
Example = doctest.Example
else:
# It's an ipython example.
terms = list(self._EXAMPLE_RE_IP.finditer(string))
Example = IPExample
ip2py = True
for m in terms:
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno,ip2py)
# Append the random-output marker (it defaults to empty in most
# cases, it's only non-empty for 'all-random' tests):
want += random_marker
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append(Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options))
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def _parse_example(self, m, name, lineno,ip2py=False):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
Optional:
`ip2py`: if true, filter the input via IPython to convert the syntax
into valid python.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
# We're using variable-length input prompts
ps1 = m.group('ps1')
ps2 = m.group('ps2')
ps1_len = len(ps1)
self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
if ps2:
self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
if ip2py:
# Convert source input from IPython into valid Python syntax
source = self.ip2py(source)
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
# Remove ipython output prompt that might be present in the first line
want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
Note: IPython-modified version which takes the input prompt length as a
parameter, so that prompts of variable length can be dealt with.
"""
space_idx = indent+ps1_len
min_len = space_idx+1
for i, line in enumerate(lines):
if len(line) >= min_len and line[space_idx] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:space_idx], line))
SKIP = doctest.register_optionflag('SKIP')
| IPDocTestParser |
python | encode__django-rest-framework | tests/test_serializer.py | {
"start": 23891,
"end": 24476
} | class ____:
def test_serializer_context(self):
class NestedSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# .context should not cache
self.context
class ParentSerializer(serializers.Serializer):
nested = NestedSerializer()
serializer = ParentSerializer(data={}, context={'foo': 'bar'})
assert serializer.context == {'foo': 'bar'}
assert serializer.fields['nested'].context == {'foo': 'bar'}
| Test2555Regression |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_avatar.py | {
"start": 8719,
"end": 9184
} | class ____(SentryAppAvatarTestBase):
def test_delete(self) -> None:
"""Test that when the related sentryapp is deleted (not really deleted, but date_deleted is set), the associated avatars are deleted"""
self.create_avatar(is_color=True)
self.create_avatar(is_color=False)
assert SentryAppAvatar.objects.count() == 2
self.unpublished_app.delete()
assert SentryAppAvatar.objects.count() == 0
| SentryAppAvatarDeleteTest |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/jupyterhub/tests.py | {
"start": 248,
"end": 812
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = JupyterHubProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"kind": "user",
"name": "abc",
"admin": false,
"groups": [],
"server": null,
"pending": null,
"created": "2016-12-06T18:30:50.297567Z",
"last_activity": "2017-02-07T17:29:36.470236Z",
"servers": null}
""",
)
def get_expected_to_str(self):
return "abc"
| JupyterHubTests |
python | scikit-image__scikit-image | src/skimage/future/trainable_segmentation.py | {
"start": 287,
"end": 5604
} | class ____:
"""Estimator for classifying pixels.
Parameters
----------
clf : classifier object, optional
classifier object, exposing a ``fit`` and a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
features_func : function, optional
function computing features on all pixels of the image, to be passed
to the classifier. The output should be of shape
``(m_features, *labels.shape)``. If None,
:func:`skimage.feature.multiscale_basic_features` is used.
Methods
-------
compute_features
fit
predict
"""
def __init__(self, clf=None, features_func=None):
if clf is None:
if has_sklearn:
self.clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
else:
raise ImportError(
"Please install scikit-learn or pass a classifier instance"
"to TrainableSegmenter."
)
else:
self.clf = clf
self.features_func = features_func
def compute_features(self, image):
if self.features_func is None:
self.features_func = multiscale_basic_features
self.features = self.features_func(image)
def fit(self, image, labels):
"""Train classifier using partially labeled (annotated) image.
Parameters
----------
image : ndarray
Input image, which can be grayscale or multichannel, and must have a
number of dimensions compatible with ``self.features_func``.
labels : ndarray of ints
Labeled array of shape compatible with ``image`` (same shape for a
single-channel image). Labels >= 1 correspond to the training set and
label 0 to unlabeled pixels to be segmented.
"""
self.compute_features(image)
fit_segmenter(labels, self.features, self.clf)
def predict(self, image):
"""Segment new image using trained internal classifier.
Parameters
----------
image : ndarray
Input image, which can be grayscale or multichannel, and must have a
number of dimensions compatible with ``self.features_func``.
Raises
------
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
"""
if self.features_func is None:
self.features_func = multiscale_basic_features
features = self.features_func(image)
return predict_segmenter(features, self.clf)
def fit_segmenter(labels, features, clf):
"""Segmentation using labeled parts of the image and a classifier.
Parameters
----------
labels : ndarray of ints
Image of labels. Labels >= 1 correspond to the training set and
label 0 to unlabeled pixels to be segmented.
features : ndarray
Array of features, with the first dimension corresponding to the number
of features, and the other dimensions correspond to ``labels.shape``.
clf : classifier object
classifier object, exposing a ``fit`` and a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
Returns
-------
clf : classifier object
classifier trained on ``labels``
Raises
------
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
"""
mask = labels > 0
training_data = features[mask]
training_labels = labels[mask].ravel()
clf.fit(training_data, training_labels)
return clf
def predict_segmenter(features, clf):
"""Segmentation of images using a pretrained classifier.
Parameters
----------
features : ndarray
Array of features, with the last dimension corresponding to the number
of features, and the other dimensions are compatible with the shape of
the image to segment, or a flattened image.
clf : classifier object
trained classifier object, exposing a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier. The
classifier must be already trained, for example with
:func:`skimage.future.fit_segmenter`.
Returns
-------
output : ndarray
Labeled array, built from the prediction of the classifier.
"""
sh = features.shape
if features.ndim > 2:
features = features.reshape((-1, sh[-1]))
try:
predicted_labels = clf.predict(features)
except NotFittedError:
raise NotFittedError(
"You must train the classifier `clf` first"
"for example with the `fit_segmenter` function."
)
except ValueError as err:
if err.args and 'x must consist of vectors of length' in err.args[0]:
raise ValueError(
err.args[0]
+ '\n'
+ "Maybe you did not use the same type of features for training the classifier."
)
else:
raise err
output = predicted_labels.reshape(sh[:-1])
return output
| TrainableSegmenter |
python | encode__django-rest-framework | rest_framework/test.py | {
"start": 10012,
"end": 13375
} | class ____(APIRequestFactory, DjangoClient):
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ForceAuthClientHandler(enforce_csrf_checks)
self._credentials = {}
def credentials(self, **kwargs):
"""
Sets headers that will be used on every outgoing request.
"""
self._credentials = kwargs
def force_authenticate(self, user=None, token=None):
"""
Forcibly authenticates outgoing requests with the given
user and/or token.
"""
self.handler._force_user = user
self.handler._force_token = token
if user is None and token is None:
self.logout() # Also clear any possible session info if required
def request(self, **kwargs):
# Ensure that any credentials set get added to every request.
kwargs.update(self._credentials)
return super().request(**kwargs)
def get(self, path, data=None, follow=False, **extra):
response = super().get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def post(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super().post(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, data=data, format=format, content_type=content_type, **extra)
return response
def put(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super().put(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, data=data, format=format, content_type=content_type, **extra)
return response
def patch(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super().patch(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, data=data, format=format, content_type=content_type, **extra)
return response
def delete(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super().delete(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, data=data, format=format, content_type=content_type, **extra)
return response
def options(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super().options(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, data=data, format=format, content_type=content_type, **extra)
return response
def logout(self):
self._credentials = {}
# Also clear any `force_authenticate`
self.handler._force_user = None
self.handler._force_token = None
if self.session:
super().logout()
| APIClient |
python | jina-ai__jina | tests/unit/serve/runtimes/gateway/graph/test_topology_graph.py | {
"start": 21826,
"end": 42530
} | class ____:
def __init__(
self,
graph_representation,
conditions={},
deployments_metadata={},
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.connection_pool = DummyMockConnectionPool(*args, **kwargs)
self.graph = TopologyGraph(
graph_representation,
graph_conditions=conditions,
deployments_metadata=deployments_metadata,
)
async def receive_from_client(self, client_id, msg: 'DataRequest'):
graph = copy.deepcopy(self.graph)
# important that the gateway needs to have an instance of the graph per request
tasks_to_respond = []
tasks_to_ignore = []
for origin_node in graph.origin_nodes:
leaf_tasks = origin_node.get_leaf_req_response_tasks(
self.connection_pool, msg, None
)
# Every origin node returns a set of tasks that are the ones corresponding to the leafs of each of their subtrees that unwrap all the previous tasks.
# It starts like a chain of waiting for tasks from previous nodes
tasks_to_respond.extend([task for ret, task in leaf_tasks if ret])
tasks_to_ignore.extend([task for ret, task in leaf_tasks if not ret])
resp = await asyncio.gather(*tasks_to_respond)
response, _ = zip(*resp)
return client_id, response
def create_req_from_text(text: str):
req = DataRequest()
da = DocumentArray()
da.append(Document(text=text, tags={'key': 4}))
req.data.docs = da
return req
@pytest.mark.asyncio
async def test_message_ordering_linear_graph(linear_graph_dict):
runtime = DummyMockGatewayRuntime(linear_graph_dict)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
for client_id, client_resps in resps:
assert len(client_resps) == 1
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-deployment2-client{client_id}-deployment3'
== client_resps[0].docs[0].text
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
'conditions, node_skipped',
[
({}, ''),
(
{
'deployment1': {'tags__key': {'$eq': 5}},
'deployment2': {'tags__key': {'$eq': 4}},
},
'deployment1',
),
(
{
'deployment1': {'tags__key': {'$eq': 4}},
'deployment2': {'tags__key': {'$eq': 5}},
},
'deployment2',
),
],
)
async def test_message_ordering_bifurcation_graph(
bifurcation_graph_dict, conditions, node_skipped
):
runtime = DummyMockGatewayRuntime(bifurcation_graph_dict, conditions)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
await asyncio.sleep(0.1) # need to terminate the floating deployments tasks
for client_id, client_resps in resps:
assert len(client_resps) == 2
def sorting_key(msg):
if len(msg.docs) > 0:
return msg.docs[0].text
else:
return '-1'
sorted_clients_resps = list(sorted(client_resps, key=sorting_key))
if node_skipped != 'deployment2':
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3'
== sorted_clients_resps[0].docs[0].text
)
else:
assert len(sorted_clients_resps[0].docs) == 0
assert (
f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5'
== sorted_clients_resps[1].docs[0].text
)
# assert the floating deployment was sent message
if node_skipped != 'deployment1':
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment1'
]
)
assert (
f'client{client_id}-Request-client{client_id}-deployment0'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment1']
)
else:
assert (
'deployment1'
not in runtime.connection_pool.sent_msg[f'client{client_id}']
)
assert (
f'client{client_id}-Request-client{client_id}-deployment6'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment6'
]
)
assert (
f'client{client_id}-Request'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment6']
)
@pytest.mark.asyncio
async def test_message_ordering_merge_in_gateway_graph(
merge_graph_dict_directly_merge_in_gateway,
):
runtime = DummyMockGatewayRuntime(merge_graph_dict_directly_merge_in_gateway)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
for client_id, client_resps in resps:
assert len(client_resps) == 2
assert (
None in client_resps
) # at the merge branch, only responds to the last part
filtered_client_resps = [resp for resp in client_resps if resp is not None]
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger'
in list(map(lambda resp: resp.data.docs[0].text, filtered_client_resps))
)
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger'
in list(map(lambda resp: resp.data.docs[0].text, filtered_client_resps))
)
# TODO: need to add logic to merge messages
assert deployment1_path or deployment2_path
@pytest.mark.asyncio
async def test_message_ordering_merge_in_last_deployment_graph(
merge_graph_dict_directly_merge_in_last_deployment,
):
runtime = DummyMockGatewayRuntime(
merge_graph_dict_directly_merge_in_last_deployment
)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
for client_id, client_resps in resps:
assert len(client_resps) == 2
assert (
None in client_resps
) # at the merge branch, only responds to the last part
filtered_client_resps = [resp for resp in client_resps if resp is not None]
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger-client{client_id}-deployment_last'
in list(map(lambda resp: resp.data.docs[0].text, filtered_client_resps))
)
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger-client{client_id}-deployment_last'
in list(map(lambda resp: resp.data.docs[0].text, filtered_client_resps))
)
# TODO: need to add logic to merge messages
assert deployment1_path or deployment2_path
@pytest.mark.asyncio
async def test_message_ordering_complete_graph(complete_graph_dict):
runtime = DummyMockGatewayRuntime(complete_graph_dict)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
await asyncio.sleep(0.1) # need to terminate the floating deployments tasks
for client_id, client_resps in resps:
assert len(client_resps) == 3
assert (
None in client_resps
) # at the merge branch, only responds to the last part
filtered_client_resps = [resp for resp in client_resps if resp is not None]
assert len(filtered_client_resps) == 2
sorted_filtered_client_resps = list(
sorted(filtered_client_resps, key=lambda msg: msg.docs[0].text)
)
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1'
== sorted_filtered_client_resps[0].docs[0].text
)
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3-client{client_id}-merger-client{client_id}-deployment_last'
== sorted_filtered_client_resps[1].docs[0].text
)
deployment4_path = (
f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5-client{client_id}-merger-client{client_id}-deployment_last'
== sorted_filtered_client_resps[1].docs[0].text
)
assert deployment2_path or deployment4_path
# assert the floating deployment was sent message
assert (
f'client{client_id}-Request-client{client_id}-deployment6'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment6'
]
)
assert (
f'client{client_id}-Request'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment6']
)
@pytest.mark.asyncio
async def test_message_ordering_hanging_after_merge_graph(
graph_hanging_deployment_after_merge,
):
runtime = DummyMockGatewayRuntime(graph_hanging_deployment_after_merge)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
await asyncio.sleep(0.1) # need to terminate the floating deployments tasks
for client_id, client_resps in resps:
assert len(client_resps) == 2
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3'
== client_resps[0].docs[0].text
)
assert (
f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5'
== client_resps[1].docs[0].text
)
# assert the floating deployment was sent message
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment1'
]
)
assert (
f'client{client_id}-Request-client{client_id}-deployment0'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment1']
)
path6 = (
f'client{client_id}-Request-client{client_id}-deployment6-client{client_id}-deployment7-client{client_id}-deployment9'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment9'
]
)
path8 = (
f'client{client_id}-Request-client{client_id}-deployment8-client{client_id}-deployment7-client{client_id}-deployment9'
== runtime.connection_pool.responded_messages[f'client{client_id}'][
'deployment9'
]
)
assert path6 or path8
if path6:
assert (
f'client{client_id}-Request-client{client_id}-deployment6-client{client_id}-deployment7'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment9']
)
if path8:
assert (
f'client{client_id}-Request-client{client_id}-deployment8-client{client_id}-deployment7'
== runtime.connection_pool.sent_msg[f'client{client_id}']['deployment9']
)
@pytest.mark.asyncio
async def test_message_ordering_two_joins_graph(
two_joins_graph,
):
runtime = DummyMockGatewayRuntime(two_joins_graph)
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
await asyncio.sleep(0.1) # need to terminate the floating deployments tasks
for client_id, client_resps in resps:
assert len(client_resps) == 4
filtered_client_resps = [resp for resp in client_resps if resp is not None]
assert len(filtered_client_resps) == 1
path12 = (
f'client{client_id}-Request-client{client_id}-p1-client{client_id}-joiner_1-client{client_id}-p2-client{client_id}-p4'
== filtered_client_resps[0].docs[0].text
)
path13 = (
f'client{client_id}-Request-client{client_id}-p1-client{client_id}-joiner_1-client{client_id}-p3-client{client_id}-p4'
== filtered_client_resps[0].docs[0].text
)
path02 = (
f'client{client_id}-Request-client{client_id}-p0-client{client_id}-joiner_1-client{client_id}-p2-client{client_id}-p4'
== filtered_client_resps[0].docs[0].text
)
path03 = (
f'client{client_id}-Request-client{client_id}-p0-client{client_id}-joiner_1-client{client_id}-p3-client{client_id}-p4'
== filtered_client_resps[0].docs[0].text
)
assert path02 or path03 or path12 or path13
@pytest.mark.asyncio
@pytest.mark.parametrize(
'deployments_metadata',
[
({}),
(
{
'deployment1': {'key1': 'value1'},
'deployment2': {'key2': 'value2'},
}
),
],
)
async def test_deployment_metadata_in_graph(linear_graph_dict, deployments_metadata):
runtime = DummyMockGatewayRuntime(
linear_graph_dict, deployments_metadata=deployments_metadata
)
for node in runtime.graph.origin_nodes:
if node.name in deployments_metadata:
assert node._metadata == deployments_metadata[node.name]
resps = await asyncio.gather(
runtime.receive_from_client(0, create_req_from_text('client0-Request')),
runtime.receive_from_client(1, create_req_from_text('client1-Request')),
runtime.receive_from_client(2, create_req_from_text('client2-Request')),
runtime.receive_from_client(3, create_req_from_text('client3-Request')),
runtime.receive_from_client(4, create_req_from_text('client4-Request')),
runtime.receive_from_client(5, create_req_from_text('client5-Request')),
runtime.receive_from_client(6, create_req_from_text('client6-Request')),
runtime.receive_from_client(7, create_req_from_text('client7-Request')),
runtime.receive_from_client(8, create_req_from_text('client8-Request')),
runtime.receive_from_client(9, create_req_from_text('client9-Request')),
)
assert len(resps) == 10
for client_id, client_resps in resps:
assert len(client_resps) == 1
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-deployment2-client{client_id}-deployment3'
== client_resps[0].docs[0].text
)
def test_empty_graph():
graph = TopologyGraph({})
assert not graph.origin_nodes
| DummyMockGatewayRuntime |
python | sympy__sympy | sympy/utilities/_compilation/util.py | {
"start": 478,
"end": 537
} | class ____(FileNotFoundError):
pass
| CompilerNotFoundError |
python | huggingface__transformers | src/transformers/models/camembert/modular_camembert.py | {
"start": 1624,
"end": 1671
} | class ____(RobertaModel):
pass
| CamembertModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams5.py | {
"start": 305,
"end": 338
} | class ____[R: dummy]:
...
| ClassC |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/__init__.py | {
"start": 6474,
"end": 8053
} | class ____(tuple):
"""
An immutable collection of selectable EntryPoint objects.
"""
__slots__ = ()
def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override]
"""
Get the EntryPoint in self matching name.
"""
try:
return next(iter(self.select(name=name)))
except StopIteration:
raise KeyError(name)
def __repr__(self):
"""
Repr with classname and tuple constructor to
signal that we deviate from regular tuple behavior.
"""
return '%s(%r)' % (self.__class__.__name__, tuple(self))
def select(self, **params) -> EntryPoints:
"""
Select entry points from self that match the
given parameters (typically group and/or name).
"""
return EntryPoints(ep for ep in self if py39.ep_matches(ep, **params))
@property
def names(self) -> Set[str]:
"""
Return the set of all names of all entry points.
"""
return {ep.name for ep in self}
@property
def groups(self) -> Set[str]:
"""
Return the set of all groups of all entry points.
"""
return {ep.group for ep in self}
@classmethod
def _from_text_for(cls, text, dist):
return cls(ep._for(dist) for ep in cls._from_text(text))
@staticmethod
def _from_text(text):
return (
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
for item in Sectioned.section_pairs(text or '')
)
| EntryPoints |
python | huggingface__transformers | src/transformers/models/glm4_moe/modular_glm4_moe.py | {
"start": 1316,
"end": 10393
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4MoeModel`]. It is used to instantiate a
Glm4Moe model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [THUDM/GLM-4-100B-A10B](https://huggingface.co/THUDM/GLM-4-100B-A10B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm4Moe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4MoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 46):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 96):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
number of experts per token.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
use_qk_norm (`bool`, *optional*, defaults to `False`):
Whether to use query-key normalization in the attention
```python
>>> from transformers import Glm4MoeModel, Glm4MoeConfig
>>> # Initializing a Glm4Moe style configuration
>>> configuration = Glm4MoeConfig()
>>> # Initializing a model from the GLM-4-MOE-100B-A10B style configuration
>>> model = Glm4MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4_moe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4Moe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: Optional[int] = 151552,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 10944,
num_hidden_layers: Optional[int] = 46,
num_attention_heads: Optional[int] = 96,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 131072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
moe_intermediate_size: Optional[int] = 1408,
num_experts_per_tok: Optional[int] = 8,
n_shared_experts: Optional[int] = 1,
n_routed_experts: Optional[int] = 128,
routed_scaling_factor: Optional[float] = 1.0,
n_group: Optional[int] = 1,
topk_group: Optional[int] = 1,
first_k_dense_replace: Optional[int] = 1,
norm_topk_prob: Optional[bool] = True,
use_qk_norm: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.use_qk_norm = use_qk_norm
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
| Glm4MoeConfig |
python | huggingface__transformers | src/transformers/models/resnet/modeling_resnet.py | {
"start": 13669,
"end": 16205
} | class ____(ResNetPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embedding_size] + config.hidden_sizes
self.embedder = ResNetEmbeddings(config)
self.encoder = ResNetEncoder(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/resnet-50", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = ["ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone"]
| ResNetBackbone |
python | redis__redis-py | redis/multidb/healthcheck.py | {
"start": 2749,
"end": 4124
} | class ____(AbstractHealthCheckPolicy):
"""
Policy that returns True if a majority of health check probes are successful.
"""
def __init__(self, health_check_probes: int, health_check_delay: float):
super().__init__(health_check_probes, health_check_delay)
def execute(self, health_checks: List[HealthCheck], database) -> bool:
for health_check in health_checks:
if self.health_check_probes % 2 == 0:
allowed_unsuccessful_probes = self.health_check_probes / 2
else:
allowed_unsuccessful_probes = (self.health_check_probes + 1) / 2
for attempt in range(self.health_check_probes):
try:
if not health_check.check_health(database):
allowed_unsuccessful_probes -= 1
if allowed_unsuccessful_probes <= 0:
return False
except Exception as e:
allowed_unsuccessful_probes -= 1
if allowed_unsuccessful_probes <= 0:
raise UnhealthyDatabaseException(
"Unhealthy database", database, e
)
if attempt < self.health_check_probes - 1:
sleep(self._health_check_delay)
return True
| HealthyMajorityPolicy |
python | getsentry__sentry | tests/sentry/preprod/size_analysis/test_compare.py | {
"start": 27524,
"end": 33503
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.organization)
def _create_treemap_element(self, name, size, path=None, children=None):
"""Helper to create TreemapElement."""
return TreemapElement(
name=name,
size=size,
path=path,
is_dir=children is not None,
children=children or [],
)
def _create_size_analysis_results(
self, download_size=500, install_size=1000, treemap_root=None, analysis_version=None
):
"""Helper to create SizeAnalysisResults."""
treemap = None
if treemap_root:
treemap = TreemapResults(
root=treemap_root,
file_count=1,
category_breakdown={},
platform="test",
)
return SizeAnalysisResults(
analysis_duration=1.0,
download_size=download_size,
install_size=install_size,
treemap=treemap,
analysis_version=analysis_version,
)
def test_compare_skips_diff_items_on_major_version_mismatch(self):
"""Integration test: diff items should be skipped when major versions differ."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=2000,
max_download_size=1000,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Create treemaps with differences
head_treemap = self._create_treemap_element("file.txt", 150)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(
treemap_root=head_treemap, analysis_version="2.0.0"
)
base_results = self._create_size_analysis_results(
treemap_root=base_treemap, analysis_version="1.0.0"
)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
# Diff items should be empty due to version mismatch
assert result.diff_items == []
assert result.skipped_diff_item_comparison is True
assert result.head_analysis_version == "2.0.0"
assert result.base_analysis_version == "1.0.0"
# Size metrics should still be populated
assert result.size_metric_diff_item.head_install_size == 2000
assert result.size_metric_diff_item.base_install_size == 1500
def test_compare_skips_diff_items_on_minor_version_mismatch(self):
"""Integration test: diff items should be skipped when minor versions differ."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=2000,
max_download_size=1000,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
head_treemap = self._create_treemap_element("file.txt", 150)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(
treemap_root=head_treemap, analysis_version="1.2.0"
)
base_results = self._create_size_analysis_results(
treemap_root=base_treemap, analysis_version="1.1.0"
)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert result.diff_items == []
assert result.skipped_diff_item_comparison is True
assert result.head_analysis_version == "1.2.0"
assert result.base_analysis_version == "1.1.0"
def test_compare_includes_diff_items_on_patch_version_mismatch(self):
"""Integration test: diff items should be included when only patch versions differ."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=2000,
max_download_size=1000,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
head_treemap = self._create_treemap_element("file.txt", 150)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(
treemap_root=head_treemap, analysis_version="1.0.2"
)
base_results = self._create_size_analysis_results(
treemap_root=base_treemap, analysis_version="1.0.1"
)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
# Diff items should be present since only patch version differs
assert len(result.diff_items) == 1
assert result.skipped_diff_item_comparison is False
assert result.diff_items[0].size_diff == 50
assert result.diff_items[0].type == DiffType.INCREASED
| CompareWithVersionSkippingTest |
python | bokeh__bokeh | release/enums.py | {
"start": 611,
"end": 691
} | class ____(Enum):
PASS = "PASS"
FAIL = "FAIL"
SKIP = "SKIP"
| StepStatus |
python | walkccc__LeetCode | solutions/1120. Maximum Average Subtree/1120.py | {
"start": 60,
"end": 114
} | class ____:
summ: int
count: int
maxAverage: int
| T |
python | falconry__falcon | tests/test_httperror.py | {
"start": 5218,
"end": 5365
} | class ____:
def on_get(self, req, resp):
raise falcon.HTTPLengthRequired(title='title', description='description')
| LengthRequiredResource |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 5993,
"end": 8693
} | class ____(CythonVariable):
def __init__(self,
module,
name,
cname,
pf_cname,
qualified_name,
lineno,
type=CObject,
is_initmodule_function="False"):
super().__init__(name,
cname,
qualified_name,
type,
lineno)
self.module = module
self.pf_cname = pf_cname
self.is_initmodule_function = is_initmodule_function == "True"
self.locals = {}
self.arguments = []
self.step_into_functions = set()
# General purpose classes
frame_repr_whitelist = {
"Frame.is_valid",
"Frame.name",
"Frame.architecture",
"Frame.type",
"Frame.pc",
"Frame.block",
"Frame.function",
"Frame.older",
"Frame.newer",
"Frame.find_sal",
"Frame.select",
"Frame.static_link",
"Frame.level",
"Frame.language",
"Symbol.is_valid",
"Symbol.value",
"Symtab_and_line.is_valid",
"Symtab.is_valid",
"Symtab.fullname",
"Symtab.global_block",
"Symtab.static_block",
"Symtab.linetable",
}
def frame_repr(frame):
"""Returns a string representing the internal state of a provided GDB frame
https://sourceware.org/gdb/current/onlinedocs/gdb.html/Frames-In-Python.html
Created to serve as GDB.Frame.__repr__ for debugging purposes. GDB has many
layers of abstraction separating the state of the debugger from the
corresponding source code. This prints a tree of instance properties,
expanding the values for Symtab_and_line, Symbol, and Symtab.
Most of these properties require computation to determine, meaning much of
relevant info is behind a monad, a subset of which are evaluated.
Arguments
frame The GDB.Frame instance to be represented as a string
"""
res = f"{frame}\n"
for attribute in sorted(dir(frame)):
if attribute.startswith("__"):
continue
value = getattr(frame, attribute)
if callable(value) and value.__qualname__ in frame_repr_whitelist:
value = value()
if type(value) in [gdb.Symtab_and_line, gdb.Symbol, gdb.Symtab]:
# strip last line since it will get added on at the end of the loop
value = frame_repr(value).rstrip("\n").replace("\n", "\n\t")
res += f"{attribute}: " + (
f"{value:x}\n" if isinstance(value, int) and attribute != "line"
else f"{value}\n")
return res
| CythonFunction |
python | django__django | tests/template_tests/filter_tests/test_iriencode.py | {
"start": 1327,
"end": 1631
} | class ____(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode("S\xf8r-Tr\xf8ndelag"), "S%C3%B8r-Tr%C3%B8ndelag")
def test_urlencoded(self):
self.assertEqual(
iriencode(urlencode("fran\xe7ois & jill")), "fran%C3%A7ois%20%26%20jill"
)
| FunctionTests |
python | huggingface__transformers | src/transformers/models/glpn/image_processing_glpn_fast.py | {
"start": 1161,
"end": 5411
} | class ____(BaseImageProcessorFast):
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
resample = PILImageResampling.BILINEAR
size_divisor = 32
valid_kwargs = GLPNImageProcessorKwargs
def _validate_preprocess_kwargs(self, **kwargs):
# pop `do_resize` to not raise an error as `size` is not None
kwargs.pop("do_resize", None)
return super()._validate_preprocess_kwargs(**kwargs)
def resize(
self,
image: "torch.Tensor",
size_divisor: int,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing.
Returns:
`torch.Tensor`: The resized image.
"""
height, width = image.shape[-2:]
# Rounds the height and width down to the closest multiple of size_divisor
new_h = height // size_divisor * size_divisor
new_w = width // size_divisor * size_divisor
return super().resize(
image, SizeDict(height=new_h, width=new_w), interpolation=interpolation, antialias=antialias
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size_divisor: Optional[int] = None,
interpolation: Optional["F.InterpolationMode"] = None,
do_rescale: bool = True,
rescale_factor: Optional[float] = 1 / 255,
do_normalize: bool = False,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
disable_grouping: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
resample: Optional[PILImageResampling] = None,
**kwargs,
) -> BatchFeature:
grouped_images, grouped_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_groups = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size_divisor=size_divisor, interpolation=interpolation)
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_groups[shape] = stacked_images
processed_images = reorder_images(processed_groups, grouped_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(self, outputs, target_sizes=None):
"""
Convert raw model outputs to final depth predictions.
Mirrors slow GLPN: PyTorch interpolate w/ bicubic, align_corners=False.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
results = []
target_sizes = target_sizes or [None] * predicted_depth.shape[0]
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
# Add batch and channel dimensions for interpolation
depth_4d = depth[None, None, ...]
resized = torch.nn.functional.interpolate(
depth_4d, size=target_size, mode="bicubic", align_corners=False
)
depth = resized.squeeze(0).squeeze(0)
results.append({"predicted_depth": depth})
return results
__all__ = ["GLPNImageProcessorFast"]
| GLPNImageProcessorFast |
python | tornadoweb__tornado | tornado/platform/caresresolver.py | {
"start": 306,
"end": 3500
} | class ____(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce the
same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
``pycares`` will not return a mix of ``AF_INET`` and ``AF_INET6`` when
``family`` is ``AF_UNSPEC``, so it is only recommended for use in
``AF_INET`` (i.e. IPv4). This is the default for
``tornado.simple_httpclient``, but other libraries may default to
``AF_UNSPEC``.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 6.2
This class is deprecated and will be removed in Tornado 7.0. Use the default
thread-based resolver instead.
"""
def initialize(self) -> None:
self.io_loop = IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {} # type: Dict[int, int]
def _sock_state_cb(self, fd: int, readable: bool, writable: bool) -> None:
state = (IOLoop.READ if readable else 0) | (IOLoop.WRITE if writable else 0)
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd: int, events: int) -> None:
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(
self, host: str, port: int, family: int = 0
) -> "Generator[Any, Any, List[Tuple[int, Any]]]":
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
fut = Future() # type: Future[Tuple[Any, Any]]
self.channel.gethostbyname(
host, family, lambda result, error: fut.set_result((result, error))
)
result, error = yield fut
if error:
raise OSError(
"C-Ares returned error %s: %s while resolving %s"
% (error, pycares.errno.strerror(error), host)
)
addresses = result.addresses
addrinfo = []
for address in addresses:
if "." in address:
address_family = socket.AF_INET
elif ":" in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise OSError(
"Requested socket family %d but got %d" % (family, address_family)
)
addrinfo.append((typing.cast(int, address_family), (address, port)))
return addrinfo
| CaresResolver |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-sheets/unit_tests/integration/test_auth_credentials.py | {
"start": 1078,
"end": 5153
} | class ____(GoogleSheetsBaseTest):
def test_given_authentication_error_when_check_then_status_is_failed(self) -> None:
del self._config["credentials"]["client_secret"]
output = self._check(self._config, expecting_exception=False)
msg = AirbyteConnectionStatus(status=Status.FAILED, message="Config validation error: 'Service' was expected")
expected_message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=msg)
assert output._messages[-1] == expected_message
@pytest.mark.skip("Need service credentials to test this behavior")
def test_given_service_authentication_error_when_check_then_status_is_failed(self) -> None:
# todo, test this with service credentials
wrong_service_account_info = deepcopy(service_account_info)
del wrong_service_account_info["client_email"]
wrong_service_account_info_encoded = json.dumps(service_account_info) # .encode("utf-8")
wrong_service_account_credentials = {
"auth_type": "Service",
"service_account_info": wrong_service_account_info_encoded,
}
wrong_config = {"spreadsheet_id": _SPREADSHEET_ID, "credentials": wrong_service_account_credentials}
# connection_status = self._source.check(Mock(), wrong_service_account_credentials)
output = self._check(wrong_config, expecting_exception=True)
msg = AirbyteConnectionStatus(status=Status.FAILED, message="")
expected_message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=msg)
assert output._messages[-1] == expected_message
@HttpMocker()
def test_invalid_credentials_error_message_when_check(self, http_mocker: HttpMocker) -> None:
http_mocker.post(
AuthBuilder.get_token_endpoint().with_body(AUTH_BODY).build(),
HttpResponse(json.dumps(find_template("auth_invalid_client", __file__)), 401),
)
output = self._check(self._config, expecting_exception=True)
trace_message = AirbyteTraceMessage(
type=TraceType.ERROR,
emitted_at=ANY,
error=AirbyteErrorTraceMessage(
message="Something went wrong in the connector. See the logs for more details.",
internal_message="401 Client Error: None for url: https://www.googleapis.com/oauth2/v4/token",
failure_type=FailureType.system_error,
stack_trace=ANY,
),
)
expected_message = AirbyteMessage(type=Type.TRACE, trace=trace_message)
assert output.errors[-1] == expected_message
def test_check_invalid_creds_json_file(self) -> None:
invalid_creds_json_file = {}
output = self._check(invalid_creds_json_file, expecting_exception=True)
msg = AirbyteConnectionStatus(status=Status.FAILED, message="Config validation error: 'spreadsheet_id' is a required property")
expected_message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=msg)
assert output._messages[-1] == expected_message
@HttpMocker()
def test_discover_invalid_credentials_error_message(self, http_mocker: HttpMocker) -> None:
http_mocker.post(
AuthBuilder.get_token_endpoint().with_body(AUTH_BODY).build(),
HttpResponse(json.dumps(find_template("auth_invalid_client", __file__)), 401),
)
trace_message = AirbyteTraceMessage(
type=TraceType.ERROR,
emitted_at=ANY,
error=AirbyteErrorTraceMessage(
message="Something went wrong in the connector. See the logs for more details.",
internal_message="401 Client Error: None for url: https://www.googleapis.com/oauth2/v4/token",
failure_type=FailureType.system_error,
stack_trace=ANY,
),
)
expected_message = AirbyteMessage(type=Type.TRACE, trace=trace_message)
output = self._discover(self._config, expecting_exception=True)
assert output.errors[-1] == expected_message
| TestCredentials |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 648053,
"end": 648661
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("TeamMemberEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| TeamMemberConnection |
python | scrapy__scrapy | scrapy/core/downloader/handlers/__init__.py | {
"start": 745,
"end": 894
} | class ____(Protocol):
def download_request(
self, request: Request, spider: Spider
) -> Deferred[Response]: ...
| DownloadHandlerProtocol |
python | instagram__MonkeyType | monkeytype/stubs.py | {
"start": 20555,
"end": 24455
} | class ____(TypeRewriter):
"""Replace TypedDicts in a generic type with class stubs and store all the stubs."""
def __init__(self, class_name_hint: str) -> None:
self._class_name_hint = class_name_hint
self.stubs: List[ClassStub] = []
def _rewrite_container(self, cls: type, container: type) -> type:
"""Rewrite while using the index of the inner type as a class name hint.
Otherwise, Tuple[TypedDict(...), TypedDict(...)] would give the same
name for both the generated classes."""
if container.__module__ != "typing":
return container
args = getattr(container, "__args__", None)
if args is None:
return container
elif args == ((),) or args == (): # special case of empty tuple `Tuple[()]`
elems: Tuple[Any, ...] = ()
else:
# Avoid adding a suffix for the first one so that
# single-element containers don't have a numeric suffix.
elems, stub_lists = zip(
*[
self.rewrite_and_get_stubs(
elem,
class_name_hint=self._class_name_hint
+ ("" if index == 0 else str(index + 1)),
)
for index, elem in enumerate(args)
]
)
for stubs in stub_lists:
self.stubs.extend(stubs)
# Value of type "type" is not indexable.
return cls[elems] # type: ignore[no-any-return,index]
def _add_typed_dict_class_stub(
self,
fields: Dict[str, type],
class_name: str,
base_class_name: str = "TypedDict",
total: bool = True,
) -> None:
attribute_stubs = []
for name, typ in fields.items():
rewritten_type, stubs = self.rewrite_and_get_stubs(
typ, class_name_hint=name
)
attribute_stubs.append(AttributeStub(name, rewritten_type))
self.stubs.extend(stubs)
total_flag = "" if total else ", total=False"
self.stubs.append(
ClassStub(
name=f"{class_name}({base_class_name}{total_flag})",
function_stubs=[],
attribute_stubs=attribute_stubs,
)
)
def rewrite_anonymous_TypedDict(self, typed_dict: type) -> ForwardRef: # type: ignore[override]
class_name = get_typed_dict_class_name(self._class_name_hint)
required_fields, optional_fields = field_annotations(typed_dict)
has_required_fields = len(required_fields) != 0
has_optional_fields = len(optional_fields) != 0
if not has_required_fields and not has_optional_fields:
raise Exception(
"Expected empty TypedDicts to be shrunk as Dict[Any, Any]"
" but got an empty TypedDict anyway"
)
elif has_required_fields and not has_optional_fields:
self._add_typed_dict_class_stub(required_fields, class_name)
elif not has_required_fields and has_optional_fields:
self._add_typed_dict_class_stub(optional_fields, class_name, total=False)
else:
self._add_typed_dict_class_stub(required_fields, class_name)
base_class_name = class_name
class_name = get_typed_dict_class_name(self._class_name_hint) + "NonTotal"
self._add_typed_dict_class_stub(
optional_fields, class_name, base_class_name, total=False
)
return make_forward_ref(class_name)
@staticmethod
def rewrite_and_get_stubs(
typ: type, class_name_hint: str
) -> Tuple[type, List[ClassStub]]:
rewriter = ReplaceTypedDictsWithStubs(class_name_hint)
rewritten_type = rewriter.rewrite(typ)
return rewritten_type, rewriter.stubs
| ReplaceTypedDictsWithStubs |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_merge_cell.py | {
"start": 301,
"end": 809
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_merge_cell() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_merge_cell(self):
"""Test the _write_merge_cell() method"""
self.worksheet._write_merge_cell([2, 1, 2, 2])
exp = """<mergeCell ref="B3:C3"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteMergeCell |
python | imageio__imageio | imageio/plugins/npz.py | {
"start": 719,
"end": 2670
} | class ____(Format):
"""See :mod:`imageio.plugins.npz`"""
def _can_read(self, request):
# We support any kind of image data
return request.extension in self.extensions
def _can_write(self, request):
# We support any kind of image data
return request.extension in self.extensions
# -- reader
class Reader(Format.Reader):
def _open(self):
# Load npz file, which provides another file like object
self._npz = np.load(self.request.get_file())
assert isinstance(self._npz, np.lib.npyio.NpzFile)
# Get list of names, ordered by name, but smarter
self._names = sorted(self._npz.files, key=lambda x: x.split("_")[-1])
def _close(self):
self._npz.close()
def _get_length(self):
return len(self._names)
def _get_data(self, index):
# Get data
if index < 0 or index >= len(self._names):
raise IndexError("Index out of range while reading from nzp")
im = self._npz[self._names[index]]
# Return array and empty meta data
return im, {}
def _get_meta_data(self, index):
# Get the meta data for the given index
raise RuntimeError("The npz format does not support meta data.")
# -- writer
class Writer(Format.Writer):
def _open(self):
# Npz is not such a great format. We cannot stream to the file.
# So we remember all images and write them to file at the end.
self._images = []
def _close(self):
# Write everything
np.savez_compressed(self.request.get_file(), *self._images)
def _append_data(self, im, meta):
self._images.append(im) # discart meta data
def set_meta_data(self, meta):
raise RuntimeError("The npz format does not support meta data.")
| NpzFormat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.