language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py | {
"start": 1037,
"end": 6665
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.experimental.snapshot()`."""
def _makeSnapshotDirectory(self):
tmp_dir = test.get_temp_dir()
tmp_dir = os.path.join(tmp_dir, "snapshot")
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
return tmp_dir
def _createSimpleDataset(self,
num_elements,
tmp_dir=None,
compression=snapshot.COMPRESSION_NONE):
if not tmp_dir:
tmp_dir = self._makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(
lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
dataset = dataset.repeat(num_elements)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmp_dir, compression=compression))
return dataset
def benchmarkWriteSnapshotGzipCompression(self):
num_elements = 500000
dataset = self._createSimpleDataset(
num_elements=num_elements, compression=snapshot.COMPRESSION_GZIP)
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="write_gzip",
warmup=False,
extras={
"model_name": "snapshot.benchmark.1",
"parameters": "%d" % num_elements,
},
iters=1)
def benchmarkWriteSnapshotSnappyCompression(self):
num_elements = 500000
dataset = self._createSimpleDataset(
num_elements=num_elements, compression=snapshot.COMPRESSION_SNAPPY)
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="write_snappy",
warmup=False,
extras={
"model_name": "snapshot.benchmark.2",
"parameters": "%d" % num_elements,
},
iters=1)
def benchmarkWriteSnapshotSimple(self):
num_elements = 500000
dataset = self._createSimpleDataset(num_elements=num_elements)
# We only run one iteration here because running multiple iterations will
# cause the later iterations to simply read from the already written
# snapshot rather than write a new one.
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="write_simple",
warmup=False,
extras={
"model_name": "snapshot.benchmark.3",
"parameters": "%d" % num_elements,
},
iters=1)
def benchmarkPassthroughSnapshotSimple(self):
num_elements = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elements=num_elements, tmp_dir=tmp_dir)
# Consume only 1 element, thus making sure we don't finalize.
self.run_benchmark(
dataset=dataset,
num_elements=1,
iters=1,
warmup=False,
apply_default_optimizations=True)
# Now run the actual benchmarks and report them
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="passthrough_simple",
extras={
"model_name": "snapshot.benchmark.4",
"parameters": "%d" % num_elements,
},
)
def benchmarkReadSnapshotSimple(self):
num_elements = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elements=num_elements, tmp_dir=tmp_dir)
# consume all the elements to let snapshot write things to disk
self.run_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=1,
warmup=False,
apply_default_optimizations=True)
# Now run the actual benchmarks and report them
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="read_simple",
extras={
"model_name": "snapshot.benchmark.5",
"parameters": "%d" % num_elements,
})
def benchmarkReadSnapshotGzipCompression(self):
num_elements = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elements=num_elements,
tmp_dir=tmp_dir,
compression=snapshot.COMPRESSION_GZIP)
# consume all the elements to let snapshot write things to disk
self.run_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=1,
warmup=False,
apply_default_optimizations=True)
# Now run the actual benchmarks and report them
self.run_and_report_benchmark(
dataset=dataset, num_elements=num_elements, name="read_gzip",
extras={
"model_name": "snapshot.benchmark.6",
"parameters": "%d" % num_elements,
})
def benchmarkReadSnapshotSnappyCompression(self):
num_elements = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elements=num_elements,
tmp_dir=tmp_dir,
compression=snapshot.COMPRESSION_SNAPPY)
# consume all the elements to let snapshot write things to disk
self.run_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=1,
warmup=False,
apply_default_optimizations=True)
# Now run the actual benchmarks and report them
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
name="read_snappy",
extras={
"model_name": "snapshot.benchmark.7",
"parameters": "%d" % num_elements,
})
if __name__ == "__main__":
benchmark_base.test.main()
| SnapshotDatasetBenchmark |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 28195,
"end": 29210
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a block document."""
name: Optional[BlockDocumentName] = Field(
default=None,
description=(
"The block document's name. Not required for anonymous block documents."
),
)
data: Dict[str, Any] = Field(
default_factory=dict, description="The block document's data"
)
block_schema_id: UUID = Field(default=..., description="A block schema ID")
block_type_id: UUID = Field(default=..., description="A block type ID")
is_anonymous: bool = Field(
default=False,
description=(
"Whether the block is anonymous (anonymous blocks are usually created by"
" Prefect automatically)"
),
)
@model_validator(mode="before")
def validate_name_is_present_if_not_anonymous(
cls, values: dict[str, Any]
) -> dict[str, Any]:
return validate_name_present_on_nonanonymous_blocks(values)
| BlockDocumentCreate |
python | falconry__falcon | examples/recipes/msgspec_msgpack_handler.py | {
"start": 122,
"end": 529
} | class ____(media.BaseHandler):
def deserialize(
self,
stream: ReadableIO,
content_type: Optional[str],
content_length: Optional[int],
) -> object:
return msgpack.decode(stream.read())
def serialize(self, media: object, content_type: str) -> bytes:
return msgpack.encode(media)
msgpack_handler = MsgspecMessagePackHandler()
| MsgspecMessagePackHandler |
python | huggingface__transformers | src/transformers/models/seggpt/modeling_seggpt.py | {
"start": 24751,
"end": 26116
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.decoder_embed = nn.Linear(
config.hidden_size * len(config.intermediate_hidden_state_indices),
config.patch_size**2 * config.decoder_hidden_size,
bias=True,
)
self.decoder_pred = SegGptDecoderHead(config)
self.patch_size = config.patch_size
self.decoder_hidden_size = config.decoder_hidden_size
self.config = config
def _reshape_hidden_states(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
batch_size, patch_height, patch_width, _ = hidden_states.shape
hidden_states = hidden_states.reshape(
batch_size, patch_height, patch_width, self.patch_size, self.patch_size, self.decoder_hidden_size
)
hidden_states = hidden_states.permute(0, 5, 1, 3, 2, 4)
hidden_states = hidden_states.reshape(
shape=(batch_size, -1, patch_height * self.patch_size, patch_width * self.patch_size)
)
return hidden_states
def forward(self, hidden_states: torch.FloatTensor):
hidden_states = self.decoder_embed(hidden_states)
hidden_states = self._reshape_hidden_states(hidden_states)
hidden_states = self.decoder_pred(hidden_states)
return hidden_states
@auto_docstring
| SegGptDecoder |
python | scipy__scipy | scipy/stats/_stats_py.py | {
"start": 342871,
"end": 418172
} | class ____:
r"""
Result of `scipy.stats.quantile_test`.
Attributes
----------
statistic: float
The statistic used to calculate the p-value; either ``T1``, the
number of observations less than or equal to the hypothesized quantile,
or ``T2``, the number of observations strictly less than the
hypothesized quantile. Two test statistics are required to handle the
possibility the data was generated from a discrete or mixed
distribution.
statistic_type : int
``1`` or ``2`` depending on which of ``T1`` or ``T2`` was used to
calculate the p-value respectively. ``T1`` corresponds to the
``"greater"`` alternative hypothesis and ``T2`` to the ``"less"``. For
the ``"two-sided"`` case, the statistic type that leads to smallest
p-value is used. For significant tests, ``statistic_type = 1`` means
there is evidence that the population quantile is significantly greater
than the hypothesized value and ``statistic_type = 2`` means there is
evidence that it is significantly less than the hypothesized value.
pvalue : float
The p-value of the hypothesis test.
"""
statistic: float
statistic_type: int
pvalue: float
_alternative: list[str] = field(repr=False)
_x : np.ndarray = field(repr=False)
_p : float = field(repr=False)
def confidence_interval(self, confidence_level=0.95):
"""
Compute the confidence interval of the quantile.
Parameters
----------
confidence_level : float, default: 0.95
Confidence level for the computed confidence interval
of the quantile. Default is 0.95.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
Examples
--------
>>> import numpy as np
>>> import scipy.stats as stats
>>> p = 0.75 # quantile of interest
>>> q = 0 # hypothesized value of the quantile
>>> x = np.exp(np.arange(0, 1.01, 0.01))
>>> res = stats.quantile_test(x, q=q, p=p, alternative='less')
>>> lb, ub = res.confidence_interval()
>>> lb, ub
(-inf, 2.293318740264183)
>>> res = stats.quantile_test(x, q=q, p=p, alternative='two-sided')
>>> lb, ub = res.confidence_interval(0.9)
>>> lb, ub
(1.9542373206359396, 2.293318740264183)
"""
alternative = self._alternative
p = self._p
x = np.sort(self._x)
n = len(x)
bd = stats.binom(n, p)
if confidence_level <= 0 or confidence_level >= 1:
message = "`confidence_level` must be a number between 0 and 1."
raise ValueError(message)
low_index = np.nan
high_index = np.nan
if alternative == 'less':
p = 1 - confidence_level
low = -np.inf
high_index = int(bd.isf(p))
high = x[high_index] if high_index < n else np.nan
elif alternative == 'greater':
p = 1 - confidence_level
low_index = int(bd.ppf(p)) - 1
low = x[low_index] if low_index >= 0 else np.nan
high = np.inf
elif alternative == 'two-sided':
p = (1 - confidence_level) / 2
low_index = int(bd.ppf(p)) - 1
low = x[low_index] if low_index >= 0 else np.nan
high_index = int(bd.isf(p))
high = x[high_index] if high_index < n else np.nan
return ConfidenceInterval(low, high)
def quantile_test_iv(x, q, p, alternative):
x = np.atleast_1d(x)
message = '`x` must be a one-dimensional array of numbers.'
if x.ndim != 1 or not np.issubdtype(x.dtype, np.number):
raise ValueError(message)
q = np.array(q)[()]
message = "`q` must be a scalar."
if q.ndim != 0 or not np.issubdtype(q.dtype, np.number):
raise ValueError(message)
p = np.array(p)[()]
message = "`p` must be a float strictly between 0 and 1."
if p.ndim != 0 or p >= 1 or p <= 0:
raise ValueError(message)
alternatives = {'two-sided', 'less', 'greater'}
message = f"`alternative` must be one of {alternatives}"
if alternative not in alternatives:
raise ValueError(message)
return x, q, p, alternative
@xp_capabilities(np_only=True)
def quantile_test(x, *, q=0, p=0.5, alternative='two-sided'):
r"""
Perform a quantile test and compute a confidence interval of the quantile.
This function tests the null hypothesis that `q` is the value of the
quantile associated with probability `p` of the population underlying
sample `x`. For example, with default parameters, it tests that the
median of the population underlying `x` is zero. The function returns an
object including the test statistic, a p-value, and a method for computing
the confidence interval around the quantile.
Parameters
----------
x : array_like
A one-dimensional sample.
q : float, default: 0
The hypothesized value of the quantile.
p : float, default: 0.5
The probability associated with the quantile; i.e. the proportion of
the population less than `q` is `p`. Must be strictly between 0 and
1.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the quantile associated with the probability `p`
is not `q`.
* 'less': the quantile associated with the probability `p` is less
than `q`.
* 'greater': the quantile associated with the probability `p` is
greater than `q`.
Returns
-------
result : QuantileTestResult
An object with the following attributes:
statistic : float
One of two test statistics that may be used in the quantile test.
The first test statistic, ``T1``, is the proportion of samples in
`x` that are less than or equal to the hypothesized quantile
`q`. The second test statistic, ``T2``, is the proportion of
samples in `x` that are strictly less than the hypothesized
quantile `q`.
When ``alternative = 'greater'``, ``T1`` is used to calculate the
p-value and ``statistic`` is set to ``T1``.
When ``alternative = 'less'``, ``T2`` is used to calculate the
p-value and ``statistic`` is set to ``T2``.
When ``alternative = 'two-sided'``, both ``T1`` and ``T2`` are
considered, and the one that leads to the smallest p-value is used.
statistic_type : int
Either `1` or `2` depending on which of ``T1`` or ``T2`` was
used to calculate the p-value.
pvalue : float
The p-value associated with the given alternative.
The object also has the following method:
confidence_interval(confidence_level=0.95)
Computes a confidence interval around the the
population quantile associated with the probability `p`. The
confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`. Values are `nan` when there are
not enough observations to compute the confidence interval at
the desired confidence.
Notes
-----
This test and its method for computing confidence intervals are
non-parametric. They are valid if and only if the observations are i.i.d.
The implementation of the test follows Conover [1]_. Two test statistics
are considered.
``T1``: The number of observations in `x` less than or equal to `q`.
``T1 = (x <= q).sum()``
``T2``: The number of observations in `x` strictly less than `q`.
``T2 = (x < q).sum()``
The use of two test statistics is necessary to handle the possibility that
`x` was generated from a discrete or mixed distribution.
The null hypothesis for the test is:
H0: The :math:`p^{\mathrm{th}}` population quantile is `q`.
and the null distribution for each test statistic is
:math:`\mathrm{binom}\left(n, p\right)`. When ``alternative='less'``,
the alternative hypothesis is:
H1: The :math:`p^{\mathrm{th}}` population quantile is less than `q`.
and the p-value is the probability that the binomial random variable
.. math::
Y \sim \mathrm{binom}\left(n, p\right)
is greater than or equal to the observed value ``T2``.
When ``alternative='greater'``, the alternative hypothesis is:
H1: The :math:`p^{\mathrm{th}}` population quantile is greater than `q`
and the p-value is the probability that the binomial random variable Y
is less than or equal to the observed value ``T1``.
When ``alternative='two-sided'``, the alternative hypothesis is
H1: `q` is not the :math:`p^{\mathrm{th}}` population quantile.
and the p-value is twice the smaller of the p-values for the ``'less'``
and ``'greater'`` cases. Both of these p-values can exceed 0.5 for the same
data, so the value is clipped into the interval :math:`[0, 1]`.
The approach for confidence intervals is attributed to Thompson [2]_ and
later proven to be applicable to any set of i.i.d. samples [3]_. The
computation is based on the observation that the probability of a quantile
:math:`q` to be larger than any observations :math:`x_m (1\leq m \leq N)`
can be computed as
.. math::
\mathbb{P}(x_m \leq q) = 1 - \sum_{k=0}^{m-1} \binom{N}{k}
q^k(1-q)^{N-k}
By default, confidence intervals are computed for a 95% confidence level.
A common interpretation of a 95% confidence intervals is that if i.i.d.
samples are drawn repeatedly from the same population and confidence
intervals are formed each time, the confidence interval will contain the
true value of the specified quantile in approximately 95% of trials.
A similar function is available in the QuantileNPCI R package [4]_. The
foundation is the same, but it computes the confidence interval bounds by
doing interpolations between the sample values, whereas this function uses
only sample values as bounds. Thus, ``quantile_test.confidence_interval``
returns more conservative intervals (i.e., larger).
The same computation of confidence intervals for quantiles is included in
the confintr package [5]_.
Two-sided confidence intervals are not guaranteed to be optimal; i.e.,
there may exist a tighter interval that may contain the quantile of
interest with probability larger than the confidence level.
Without further assumption on the samples (e.g., the nature of the
underlying distribution), the one-sided intervals are optimally tight.
References
----------
.. [1] W. J. Conover. Practical Nonparametric Statistics, 3rd Ed. 1999.
.. [2] W. R. Thompson, "On Confidence Ranges for the Median and Other
Expectation Distributions for Populations of Unknown Distribution
Form," The Annals of Mathematical Statistics, vol. 7, no. 3,
pp. 122-128, 1936, Accessed: Sep. 18, 2019. [Online]. Available:
https://www.jstor.org/stable/2957563.
.. [3] H. A. David and H. N. Nagaraja, "Order Statistics in Nonparametric
Inference" in Order Statistics, John Wiley & Sons, Ltd, 2005, pp.
159-170. Available:
https://onlinelibrary.wiley.com/doi/10.1002/0471722162.ch7.
.. [4] N. Hutson, A. Hutson, L. Yan, "QuantileNPCI: Nonparametric
Confidence Intervals for Quantiles," R package,
https://cran.r-project.org/package=QuantileNPCI
.. [5] M. Mayer, "confintr: Confidence Intervals," R package,
https://cran.r-project.org/package=confintr
Examples
--------
Suppose we wish to test the null hypothesis that the median of a population
is equal to 0.5. We choose a confidence level of 99%; that is, we will
reject the null hypothesis in favor of the alternative if the p-value is
less than 0.01.
When testing random variates from the standard uniform distribution, which
has a median of 0.5, we expect the data to be consistent with the null
hypothesis most of the time.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng(6981396440634228121)
>>> rvs = stats.uniform.rvs(size=100, random_state=rng)
>>> stats.quantile_test(rvs, q=0.5, p=0.5)
QuantileTestResult(statistic=45, statistic_type=1, pvalue=0.36820161732669576)
As expected, the p-value is not below our threshold of 0.01, so
we cannot reject the null hypothesis.
When testing data from the standard *normal* distribution, which has a
median of 0, we would expect the null hypothesis to be rejected.
>>> rvs = stats.norm.rvs(size=100, random_state=rng)
>>> stats.quantile_test(rvs, q=0.5, p=0.5)
QuantileTestResult(statistic=67, statistic_type=2, pvalue=0.0008737198369123724)
Indeed, the p-value is lower than our threshold of 0.01, so we reject the
null hypothesis in favor of the default "two-sided" alternative: the median
of the population is *not* equal to 0.5.
However, suppose we were to test the null hypothesis against the
one-sided alternative that the median of the population is *greater* than
0.5. Since the median of the standard normal is less than 0.5, we would not
expect the null hypothesis to be rejected.
>>> stats.quantile_test(rvs, q=0.5, p=0.5, alternative='greater')
QuantileTestResult(statistic=67, statistic_type=1, pvalue=0.9997956114162866)
Unsurprisingly, with a p-value greater than our threshold, we would not
reject the null hypothesis in favor of the chosen alternative.
The quantile test can be used for any quantile, not only the median. For
example, we can test whether the third quartile of the distribution
underlying the sample is greater than 0.6.
>>> rvs = stats.uniform.rvs(size=100, random_state=rng)
>>> stats.quantile_test(rvs, q=0.6, p=0.75, alternative='greater')
QuantileTestResult(statistic=64, statistic_type=1, pvalue=0.00940696592998271)
The p-value is lower than the threshold. We reject the null hypothesis in
favor of the alternative: the third quartile of the distribution underlying
our sample is greater than 0.6.
`quantile_test` can also compute confidence intervals for any quantile.
>>> rvs = stats.norm.rvs(size=100, random_state=rng)
>>> res = stats.quantile_test(rvs, q=0.6, p=0.75)
>>> ci = res.confidence_interval(confidence_level=0.95)
>>> ci
ConfidenceInterval(low=0.284491604437432, high=0.8912531024914844)
When testing a one-sided alternative, the confidence interval contains
all observations such that if passed as `q`, the p-value of the
test would be greater than 0.05, and therefore the null hypothesis
would not be rejected. For example:
>>> rvs.sort()
>>> q, p, alpha = 0.6, 0.75, 0.95
>>> res = stats.quantile_test(rvs, q=q, p=p, alternative='less')
>>> ci = res.confidence_interval(confidence_level=alpha)
>>> for x in rvs[rvs <= ci.high]:
... res = stats.quantile_test(rvs, q=x, p=p, alternative='less')
... assert res.pvalue > 1-alpha
>>> for x in rvs[rvs > ci.high]:
... res = stats.quantile_test(rvs, q=x, p=p, alternative='less')
... assert res.pvalue < 1-alpha
Also, if a 95% confidence interval is repeatedly generated for random
samples, the confidence interval will contain the true quantile value in
approximately 95% of replications.
>>> dist = stats.rayleigh() # our "unknown" distribution
>>> p = 0.2
>>> true_stat = dist.ppf(p) # the true value of the statistic
>>> n_trials = 1000
>>> quantile_ci_contains_true_stat = 0
>>> for i in range(n_trials):
... data = dist.rvs(size=100, random_state=rng)
... res = stats.quantile_test(data, p=p)
... ci = res.confidence_interval(0.95)
... if ci[0] < true_stat < ci[1]:
... quantile_ci_contains_true_stat += 1
>>> quantile_ci_contains_true_stat >= 950
True
This works with any distribution and any quantile, as long as the samples
are i.i.d.
"""
# Implementation carefully follows [1] 3.2
# "H0: the p*th quantile of X is x*"
# To facilitate comparison with [1], we'll use variable names that
# best match Conover's notation
X, x_star, p_star, H1 = quantile_test_iv(x, q, p, alternative)
# "We will use two test statistics in this test. Let T1 equal "
# "the number of observations less than or equal to x*, and "
# "let T2 equal the number of observations less than x*."
T1 = np.count_nonzero(X <= x_star)
T2 = np.count_nonzero(X < x_star)
# "The null distribution of the test statistics T1 and T2 is "
# "the binomial distribution, with parameters n = sample size, and "
# "p = p* as given in the null hypothesis.... Y has the binomial "
# "distribution with parameters n and p*."
n = len(X)
Y = stats.binom(n=n, p=p_star)
# "H1: the p* population quantile is less than x*"
if H1 == 'less':
# "The p-value is the probability that a binomial random variable Y "
# "is greater than *or equal to* the observed value of T2...using p=p*"
pvalue = Y.sf(T2-1) # Y.pmf(T2) + Y.sf(T2)
statistic = T2
statistic_type = 2
# "H1: the p* population quantile is greater than x*"
elif H1 == 'greater':
# "The p-value is the probability that a binomial random variable Y "
# "is less than or equal to the observed value of T1... using p = p*"
pvalue = Y.cdf(T1)
statistic = T1
statistic_type = 1
# "H1: x* is not the p*th population quantile"
elif H1 == 'two-sided':
# "The p-value is twice the smaller of the probabilities that a
# binomial random variable Y is less than or equal to the observed
# value of T1 or greater than or equal to the observed value of T2
# using p=p*."
# Note: both one-sided p-values can exceed 0.5 for the same data, so
# `clip`
pvalues = [Y.cdf(T1), Y.sf(T2 - 1)] # [greater, less]
sorted_idx = np.argsort(pvalues)
pvalue = np.clip(2*pvalues[sorted_idx[0]], 0, 1)
if sorted_idx[0]:
statistic, statistic_type = T2, 2
else:
statistic, statistic_type = T1, 1
return QuantileTestResult(
statistic=statistic,
statistic_type=statistic_type,
pvalue=pvalue,
_alternative=H1,
_x=X,
_p=p_star
)
#####################################
# STATISTICAL DISTANCES #
#####################################
@xp_capabilities(np_only=True)
def wasserstein_distance_nd(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the Wasserstein-1 distance between two N-D discrete distributions.
The Wasserstein distance, also called the Earth mover's distance or the
optimal transport distance, is a similarity metric between two probability
distributions [1]_. In the discrete case, the Wasserstein distance can be
understood as the cost of an optimal transport plan to convert one
distribution into the other. The cost is calculated as the product of the
amount of probability mass being moved and the distance it is being moved.
A brief and intuitive introduction can be found at [2]_.
.. versionadded:: 1.13.0
Parameters
----------
u_values : 2d array_like
A sample from a probability distribution or the support (set of all
possible values) of a probability distribution. Each element along
axis 0 is an observation or possible value, and axis 1 represents the
dimensionality of the distribution; i.e., each row is a vector
observation or possible value.
v_values : 2d array_like
A sample from or the support of a second distribution.
u_weights, v_weights : 1d array_like, optional
Weights or counts corresponding with the sample or probability masses
corresponding with the support values. Sum of elements must be positive
and finite. If unspecified, each value is assigned the same weight.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
Given two probability mass functions, :math:`u`
and :math:`v`, the first Wasserstein distance between the distributions
using the Euclidean norm is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int \| x-y \|_2 \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R}^n \times \mathbb{R}^n` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively. For a given value
:math:`x`, :math:`u(x)` gives the probability of :math:`u` at position
:math:`x`, and the same for :math:`v(x)`.
This is also called the optimal transport problem or the Monge problem.
Let the finite point sets :math:`\{x_i\}` and :math:`\{y_j\}` denote
the support set of probability mass function :math:`u` and :math:`v`
respectively. The Monge problem can be expressed as follows,
Let :math:`\Gamma` denote the transport plan, :math:`D` denote the
distance matrix and,
.. math::
x = \text{vec}(\Gamma) \\
c = \text{vec}(D) \\
b = \begin{bmatrix}
u\\
v\\
\end{bmatrix}
The :math:`\text{vec}()` function denotes the Vectorization function
that transforms a matrix into a column vector by vertically stacking
the columns of the matrix.
The transport plan :math:`\Gamma` is a matrix :math:`[\gamma_{ij}]` in
which :math:`\gamma_{ij}` is a positive value representing the amount of
probability mass transported from :math:`u(x_i)` to :math:`v(y_i)`.
Summing over the rows of :math:`\Gamma` should give the source distribution
:math:`u` : :math:`\sum_j \gamma_{ij} = u(x_i)` holds for all :math:`i`
and summing over the columns of :math:`\Gamma` should give the target
distribution :math:`v`: :math:`\sum_i \gamma_{ij} = v(y_j)` holds for all
:math:`j`.
The distance matrix :math:`D` is a matrix :math:`[d_{ij}]`, in which
:math:`d_{ij} = d(x_i, y_j)`.
Given :math:`\Gamma`, :math:`D`, :math:`b`, the Monge problem can be
transformed into a linear programming problem by
taking :math:`A x = b` as constraints and :math:`z = c^T x` as minimization
target (sum of costs) , where matrix :math:`A` has the form
.. math::
\begin{array} {rrrr|rrrr|r|rrrr}
1 & 1 & \dots & 1 & 0 & 0 & \dots & 0 & \dots & 0 & 0 & \dots &
0 \cr
0 & 0 & \dots & 0 & 1 & 1 & \dots & 1 & \dots & 0 & 0 &\dots &
0 \cr
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots
& \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr
0 & 0 & \dots & 0 & 0 & 0 & \dots & 0 & \dots & 1 & 1 & \dots &
1 \cr \hline
1 & 0 & \dots & 0 & 1 & 0 & \dots & \dots & \dots & 1 & 0 & \dots &
0 \cr
0 & 1 & \dots & 0 & 0 & 1 & \dots & \dots & \dots & 0 & 1 & \dots &
0 \cr
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots &
\vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr
0 & 0 & \dots & 1 & 0 & 0 & \dots & 1 & \dots & 0 & 0 & \dots & 1
\end{array}
By solving the dual form of the above linear programming problem (with
solution :math:`y^*`), the Wasserstein distance :math:`l_1 (u, v)` can
be computed as :math:`b^T y^*`.
The above solution is inspired by Vincent Herrmann's blog [3]_ . For a
more thorough explanation, see [4]_ .
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric",
https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Lili Weng, "What is Wasserstein distance?", Lil'log,
https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance.
.. [3] Hermann, Vincent. "Wasserstein GAN and the Kantorovich-Rubinstein
Duality". https://vincentherrmann.github.io/blog/wasserstein/.
.. [4] Peyré, Gabriel, and Marco Cuturi. "Computational optimal
transport." Center for Research in Economics and Statistics
Working Papers 2017-86 (2017).
See Also
--------
wasserstein_distance: Compute the Wasserstein-1 distance between two
1D discrete distributions.
Examples
--------
Compute the Wasserstein distance between two three-dimensional samples,
each with two observations.
>>> from scipy.stats import wasserstein_distance_nd
>>> wasserstein_distance_nd([[0, 2, 3], [1, 2, 5]], [[3, 2, 3], [4, 2, 5]])
3.0
Compute the Wasserstein distance between two two-dimensional distributions
with three and two weighted observations, respectively.
>>> wasserstein_distance_nd([[0, 2.75], [2, 209.3], [0, 0]],
... [[0.2, 0.322], [4.5, 25.1808]],
... [0.4, 5.2, 0.114], [0.8, 1.5])
174.15840245217169
"""
m, n = len(u_values), len(v_values)
u_values = asarray(u_values)
v_values = asarray(v_values)
if u_values.ndim > 2 or v_values.ndim > 2:
raise ValueError('Invalid input values. The inputs must have either '
'one or two dimensions.')
# if dimensions are not equal throw error
if u_values.ndim != v_values.ndim:
raise ValueError('Invalid input values. Dimensions of inputs must be '
'equal.')
# if data is 1D then call the cdf_distance function
if u_values.ndim == 1 and v_values.ndim == 1:
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
# if number of columns is not equal throw error
if u_values.shape[1] != v_values.shape[1]:
raise ValueError('Invalid input values. If two-dimensional, '
'`u_values` and `v_values` must have the same '
'number of columns.')
# if data contains np.inf then return inf or nan
if np.any(np.isinf(u_values)) ^ np.any(np.isinf(v_values)):
return np.inf
elif np.any(np.isinf(u_values)) and np.any(np.isinf(v_values)):
return np.nan
# create constraints
A_upper_part = sparse.block_diag((np.ones((1, n)), ) * m)
A_lower_part = sparse.hstack((sparse.eye(n), ) * m)
# sparse constraint matrix of size (m + n)*(m * n)
A = sparse.vstack((A_upper_part, A_lower_part))
A = sparse.coo_array(A)
# get cost matrix
D = distance_matrix(u_values, v_values, p=2)
cost = D.ravel()
# create the minimization target
p_u = np.full(m, 1/m) if u_weights is None else u_weights/np.sum(u_weights)
p_v = np.full(n, 1/n) if v_weights is None else v_weights/np.sum(v_weights)
b = np.concatenate((p_u, p_v), axis=0)
# solving LP
constraints = LinearConstraint(A=A.T, ub=cost)
opt_res = milp(c=-b, constraints=constraints, bounds=(-np.inf, np.inf))
return -opt_res.fun
@xp_capabilities(np_only=True)
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the Wasserstein-1 distance between two 1D discrete distributions.
The Wasserstein distance, also called the Earth mover's distance or the
optimal transport distance, is a similarity metric between two probability
distributions [1]_. In the discrete case, the Wasserstein distance can be
understood as the cost of an optimal transport plan to convert one
distribution into the other. The cost is calculated as the product of the
amount of probability mass being moved and the distance it is being moved.
A brief and intuitive introduction can be found at [2]_.
.. versionadded:: 1.0.0
Parameters
----------
u_values : 1d array_like
A sample from a probability distribution or the support (set of all
possible values) of a probability distribution. Each element is an
observation or possible value.
v_values : 1d array_like
A sample from or the support of a second distribution.
u_weights, v_weights : 1d array_like, optional
Weights or counts corresponding with the sample or probability masses
corresponding with the support values. Sum of elements must be positive
and finite. If unspecified, each value is assigned the same weight.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
Given two 1D probability mass functions, :math:`u` and :math:`v`, the first
Wasserstein distance between the distributions is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively. For a given value
:math:`x`, :math:`u(x)` gives the probability of :math:`u` at position
:math:`x`, and the same for :math:`v(x)`.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [3]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Lili Weng, "What is Wasserstein distance?", Lil'log,
https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance.
.. [3] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
See Also
--------
wasserstein_distance_nd: Compute the Wasserstein-1 distance between two N-D
discrete distributions.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
@xp_capabilities(np_only=True)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
Sometimes the square of this quantity is referred to as the "energy
distance" (e.g. in [2]_, [4]_), but as noted in [1]_ and [3]_, only the
definition above satisfies the axioms of a distance function (metric).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np_vecdot(np.abs(u_cdf - v_cdf), deltas)
if p == 2:
return np.sqrt(np_vecdot(np.square(u_cdf - v_cdf), deltas))
return np.power(np_vecdot(np.power(np.abs(u_cdf - v_cdf), p), deltas), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np_vecdot(a, a, axis=axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@xp_capabilities(skip_backends=[("cupy", "`repeat` can't handle array second arg"),
("dask.array", "no `take_along_axis`")],
jax_jit=False)
def rankdata(a, method='average', *, axis=None, nan_policy='propagate'):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
nan_policy : {'propagate', 'omit', 'raise'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': propagates nans through the rank calculation
* 'omit': performs the calculations ignoring nan values
* 'raise': raises an error
.. note::
When `nan_policy` is 'propagate', the output is an array of *all*
nans because ranks relative to nans in the input are undefined.
When `nan_policy` is 'omit', nans in `a` are ignored when ranking
the other values, and the corresponding locations of the output
are nan.
.. versionadded:: 1.10
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> import numpy as np
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
>>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="propagate")
array([nan, nan, nan, nan, nan, nan])
>>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="omit")
array([ 2., 3., 4., nan, 1., nan])
"""
methods = ('average', 'min', 'max', 'dense', 'ordinal')
if method not in methods:
raise ValueError(f'unknown method "{method}"')
xp = array_namespace(a)
x = xp.asarray(a)
if axis is None:
x = xp_ravel(x)
axis = -1
if xp_size(x) == 0:
dtype = xp.asarray(1.).dtype if method == 'average' else xp.asarray(1).dtype
return xp.empty(x.shape, dtype=dtype)
contains_nan = _contains_nan(x, nan_policy)
x = xp_swapaxes(x, axis, -1, xp=xp)
ranks = _rankdata(x, method, xp=xp)
if contains_nan:
default_float = xp_default_dtype(xp)
i_nan = (xp.isnan(x) if nan_policy == 'omit'
else xp.any(xp.isnan(x), axis=-1))
ranks = xp.asarray(ranks, dtype=default_float) # copy=False when implemented
ranks[i_nan] = xp.nan
ranks = xp_swapaxes(ranks, axis, -1, xp=xp)
return ranks
def _order_ranks(ranks, j, *, xp):
# Reorder ascending order `ranks` according to `j`
xp = array_namespace(ranks) if xp is None else xp
if is_numpy(xp) or is_cupy(xp):
ordered_ranks = xp.empty(j.shape, dtype=ranks.dtype)
xp.put_along_axis(ordered_ranks, j, ranks, axis=-1)
else:
# `put_along_axis` not in array API (data-apis/array-api#177)
# so argsort the argsort and take_along_axis...
j_inv = xp.argsort(j, axis=-1, stable=True)
ordered_ranks = xp.take_along_axis(ranks, j_inv, axis=-1)
return ordered_ranks
def _rankdata(x, method, return_ties=False, xp=None):
# Rank data `x` by desired `method`; `return_ties` if desired
xp = array_namespace(x) if xp is None else xp
shape = x.shape
dtype = xp.asarray(1.).dtype if method == 'average' else xp.asarray(1).dtype
# Get sort order
j = xp.argsort(x, axis=-1, stable=True)
ordinal_ranks = xp.broadcast_to(xp.arange(1, shape[-1]+1, dtype=dtype), shape)
# Ordinal ranks is very easy because ties don't matter. We're done.
if method == 'ordinal':
return _order_ranks(ordinal_ranks, j, xp=xp) # never return ties
# Sort array
y = xp.take_along_axis(x, j, axis=-1)
# Logical indices of unique elements
i = xp.concat([xp.ones(shape[:-1] + (1,), dtype=xp.bool),
y[..., :-1] != y[..., 1:]], axis=-1)
# Integer indices of unique elements
indices = xp.arange(xp_size(y))[xp.reshape(i, (-1,))] # i gets raveled
# Counts of unique elements
counts = xp.diff(indices, append=xp.asarray([xp_size(y)], dtype=indices.dtype))
# Compute `'min'`, `'max'`, and `'mid'` ranks of unique elements
if method == 'min':
ranks = ordinal_ranks[i]
elif method == 'max':
ranks = ordinal_ranks[i] + counts - 1
elif method == 'average':
# array API doesn't promote integers to floats
ranks = ordinal_ranks[i] + (xp.asarray(counts, dtype=dtype) - 1)/2
elif method == 'dense':
ranks = xp.cumulative_sum(xp.astype(i, dtype, copy=False), axis=-1)[i]
ranks = xp.reshape(xp.repeat(ranks, counts), shape)
ranks = _order_ranks(ranks, j, xp=xp)
if return_ties:
# Tie information is returned in a format that is useful to functions that
# rely on this (private) function. Example:
# >>> x = np.asarray([3, 2, 1, 2, 2, 2, 1])
# >>> _, t = _rankdata(x, 'average', return_ties=True)
# >>> t # array([2., 0., 4., 0., 0., 0., 1.]) # two 1s, four 2s, and one 3
# Unlike ranks, tie counts are *not* reordered to correspond with the order of
# the input; e.g. the number of appearances of the lowest rank element comes
# first. This is a useful format because:
# - The shape of the result is the shape of the input. Different slices can
# have different numbers of tied elements but not result in a ragged array.
# - Functions that use `t` usually don't need to which each element of the
# original array is associated with each tie count; they perform a reduction
# over the tie counts onnly. The tie counts are naturally computed in a
# sorted order, so this does not unnecessarily reorder them.
# - One exception is `wilcoxon`, which needs the number of zeros. Zeros always
# have the lowest rank, so it is easy to find them at the zeroth index.
t = xp.zeros(shape, dtype=xp.float64)
t = xpx.at(t)[i].set(xp.astype(counts, t.dtype, copy=False))
return ranks, t
return ranks
@xp_capabilities(np_only=True)
def expectile(a, alpha=0.5, *, weights=None):
r"""Compute the expectile at the specified level.
Expectiles are a generalization of the expectation in the same way as
quantiles are a generalization of the median. The expectile at level
`alpha = 0.5` is the mean (average). See Notes for more details.
Parameters
----------
a : array_like
Array containing numbers whose expectile is desired.
alpha : float, default: 0.5
The level of the expectile; e.g., ``alpha=0.5`` gives the mean.
weights : array_like, optional
An array of weights associated with the values in `a`.
The `weights` must be broadcastable to the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
An integer valued weight element acts like repeating the corresponding
observation in `a` that many times. See Notes for more details.
Returns
-------
expectile : ndarray
The empirical expectile at level `alpha`.
See Also
--------
numpy.mean : Arithmetic average
numpy.quantile : Quantile
Notes
-----
In general, the expectile at level :math:`\alpha` of a random variable
:math:`X` with cumulative distribution function (CDF) :math:`F` is given
by the unique solution :math:`t` of:
.. math::
\alpha E((X - t)_+) = (1 - \alpha) E((t - X)_+) \,.
Here, :math:`(x)_+ = \max(0, x)` is the positive part of :math:`x`.
This equation can be equivalently written as:
.. math::
\alpha \int_t^\infty (x - t)\mathrm{d}F(x)
= (1 - \alpha) \int_{-\infty}^t (t - x)\mathrm{d}F(x) \,.
The empirical expectile at level :math:`\alpha` (`alpha`) of a sample
:math:`a_i` (the array `a`) is defined by plugging in the empirical CDF of
`a`. Given sample or case weights :math:`w` (the array `weights`), it
reads :math:`F_a(x) = \frac{1}{\sum_i w_i} \sum_i w_i 1_{a_i \leq x}`
with indicator function :math:`1_{A}`. This leads to the definition of the
empirical expectile at level `alpha` as the unique solution :math:`t` of:
.. math::
\alpha \sum_{i=1}^n w_i (a_i - t)_+ =
(1 - \alpha) \sum_{i=1}^n w_i (t - a_i)_+ \,.
For :math:`\alpha=0.5`, this simplifies to the weighted average.
Furthermore, the larger :math:`\alpha`, the larger the value of the
expectile.
As a final remark, the expectile at level :math:`\alpha` can also be
written as a minimization problem. One often used choice is
.. math::
\operatorname{argmin}_t
E(\lvert 1_{t\geq X} - \alpha\rvert(t - X)^2) \,.
References
----------
.. [1] W. K. Newey and J. L. Powell (1987), "Asymmetric Least Squares
Estimation and Testing," Econometrica, 55, 819-847.
.. [2] T. Gneiting (2009). "Making and Evaluating Point Forecasts,"
Journal of the American Statistical Association, 106, 746 - 762.
:doi:`10.48550/arXiv.0912.0902`
Examples
--------
>>> import numpy as np
>>> from scipy.stats import expectile
>>> a = [1, 4, 2, -1]
>>> expectile(a, alpha=0.5) == np.mean(a)
True
>>> expectile(a, alpha=0.2)
0.42857142857142855
>>> expectile(a, alpha=0.8)
2.5714285714285716
>>> weights = [1, 3, 1, 1]
>>> expectile(a, alpha=0.8, weights=weights)
3.3333333333333335
"""
if alpha < 0 or alpha > 1:
raise ValueError(
"The expectile level alpha must be in the range [0, 1]."
)
a = np.asarray(a)
if weights is not None:
weights = np.broadcast_to(weights, a.shape)
# This is the empirical equivalent of Eq. (13) with identification
# function from Table 9 (omitting a factor of 2) in [2] (their y is our
# data a, their x is our t)
def first_order(t):
return np.average(np.abs((a <= t) - alpha) * (t - a), weights=weights)
if alpha >= 0.5:
x0 = np.average(a, weights=weights)
x1 = np.amax(a)
else:
x1 = np.average(a, weights=weights)
x0 = np.amin(a)
if x0 == x1:
# a has a single unique element
return x0
# Note that the expectile is the unique solution, so no worries about
# finding a wrong root.
res = root_scalar(first_order, x0=x0, x1=x1)
return res.root
def _lmoment_iv(sample, order, axis, sorted, standardize, xp):
# input validation/standardization for `lmoment`
sample = xp_promote(sample, force_floating=True, xp=xp)
message = "`sample` must be an array of real numbers."
if not xp.isdtype(sample.dtype, "real floating"):
raise ValueError(message)
message = "`order` must be a scalar or a non-empty array of positive integers."
order = xp.arange(1, 5) if order is None else xp.asarray(order)
if (not xp.isdtype(order.dtype, "integral") or xp.any(order <= 0)
or order.size == 0 or order.ndim > 1):
raise ValueError(message)
# input validation of non-array types can still be performed with NumPy
axis = np.asarray(axis)[()]
message = "`axis` must be an integer."
if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0:
raise ValueError(message)
axis = int(axis)
sorted = np.asarray(sorted)[()]
message = "`sorted` must be True or False."
if not np.issubdtype(sorted.dtype, np.bool_) or sorted.ndim != 0:
raise ValueError(message)
sorted = bool(sorted)
standardize = np.asarray(standardize)[()]
message = "`standardize` must be True or False."
if not np.issubdtype(standardize.dtype, np.bool_) or standardize.ndim != 0:
raise ValueError(message)
standardize = bool(standardize)
sample = xp.moveaxis(sample, axis, -1)
sample = xp.sort(sample, axis=-1) if not sorted else sample
return sample, order, axis, sorted, standardize
def _br(x, *, r=0, xp):
n = x.shape[-1]
x = xp.expand_dims(x, axis=-2)
x = xp.broadcast_to(x, x.shape[:-2] + (r.shape[0], n))
x = xp.triu(x)
j = xp.arange(n, dtype=x.dtype)
n = xp.asarray(n, dtype=x.dtype)[()]
return (xp.vecdot(special.binom(j, r[:, xp.newaxis]), x, axis=-1)
/ special.binom(n-1, r) / n)
def _prk(r, k):
# Writen to match [1] Equation 27 closely to facilitate review.
# This does not protect against overflow, so improvements to
# robustness would be a welcome follow-up.
return (-1)**(r-k)*special.binom(r, k)*special.binom(r+k, k)
@xp_capabilities(skip_backends=[('dask.array', "too many issues")],
jax_jit=False, cpu_only=True, # torch doesn't have `binom`
exceptions=('cupy', 'jax.numpy'))
@_axis_nan_policy_factory( # noqa: E302
_moment_result_object, n_samples=1, result_to_tuple=_moment_tuple,
n_outputs=lambda kwds: _moment_outputs(kwds, [1, 2, 3, 4])
)
def lmoment(sample, order=None, *, axis=0, sorted=False, standardize=True):
r"""Compute L-moments of a sample from a continuous distribution
The L-moments of a probability distribution are summary statistics with
uses similar to those of conventional moments, but they are defined in
terms of the expected values of order statistics.
Sample L-moments are defined analogously to population L-moments, and
they can serve as estimators of population L-moments. They tend to be less
sensitive to extreme observations than conventional moments.
Parameters
----------
sample : array_like
The real-valued sample whose L-moments are desired.
order : array_like, optional
The (positive integer) orders of the desired L-moments.
Must be a scalar or non-empty 1D array. Default is [1, 2, 3, 4].
axis : int or None, default=0
If an int, the axis of the input along which to compute the statistic.
The statistic of each axis-slice (e.g. row) of the input will appear
in a corresponding element of the output. If None, the input will be
raveled before computing the statistic.
sorted : bool, default=False
Whether `sample` is already sorted in increasing order along `axis`.
If False (default), `sample` will be sorted.
standardize : bool, default=True
Whether to return L-moment ratios for orders 3 and higher.
L-moment ratios are analogous to standardized conventional
moments: they are the non-standardized L-moments divided
by the L-moment of order 2.
Returns
-------
lmoments : ndarray
The sample L-moments of order `order`.
See Also
--------
moment
References
----------
.. [1] D. Bilkova. "L-Moments and TL-Moments as an Alternative Tool of
Statistical Data Analysis". Journal of Applied Mathematics and
Physics. 2014. :doi:`10.4236/jamp.2014.210104`
.. [2] J. R. M. Hosking. "L-Moments: Analysis and Estimation of Distributions
Using Linear Combinations of Order Statistics". Journal of the Royal
Statistical Society. 1990. :doi:`10.1111/j.2517-6161.1990.tb01775.x`
.. [3] "L-moment". *Wikipedia*. https://en.wikipedia.org/wiki/L-moment.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng(328458568356392)
>>> sample = rng.exponential(size=100000)
>>> stats.lmoment(sample)
array([1.00124272, 0.50111437, 0.3340092 , 0.16755338])
Note that the first four standardized population L-moments of the standard
exponential distribution are 1, 1/2, 1/3, and 1/6; the sample L-moments
provide reasonable estimates.
"""
xp = array_namespace(sample)
args = _lmoment_iv(sample, order, axis, sorted, standardize, xp=xp)
sample, order, axis, sorted, standardize = args
n_moments = int(xp.max(order))
k = xp.arange(n_moments, dtype=sample.dtype)
prk = _prk(xpx.expand_dims(k, axis=tuple(range(1, sample.ndim+1))), k)
bk = _br(sample, r=k, xp=xp)
n = sample.shape[-1]
if n < bk.shape[-1]:
bk = xpx.at(bk)[..., n:].set(0) # remove NaNs due to n_moments > n
lmoms = xp.vecdot(prk, bk, axis=-1)
if standardize and n_moments > 2:
lmoms = xpx.at(lmoms)[2:, ...].divide(lmoms[1, ...])
if n < lmoms.shape[0]:
lmoms = xpx.at(lmoms)[n:, ...].set(xp.nan) # add NaNs where appropriate
# return lmoms[order-1] # strict can't handle fancy indexing plus ellipses
return xp.take(lmoms, order - 1, axis=0) if order.ndim == 1 else lmoms[order - 1]
LinregressResult = _make_tuple_bunch('LinregressResult',
['slope', 'intercept', 'rvalue',
'pvalue', 'stderr'],
extra_field_names=['intercept_stderr'])
def _pack_LinregressResult(slope, intercept, rvalue, pvalue, stderr, intercept_stderr):
return LinregressResult(slope, intercept, rvalue, pvalue, stderr,
intercept_stderr=intercept_stderr)
def _unpack_LinregressResult(res, _):
return tuple(res) + (res.intercept_stderr,)
@xp_capabilities(np_only=True)
@_axis_nan_policy_factory(_pack_LinregressResult, n_samples=2,
result_to_tuple=_unpack_LinregressResult, paired=True,
too_small=1, n_outputs=6)
def linregress(x, y, alternative='two-sided'):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length N.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the slope of the regression line is nonzero
* 'less': the slope of the regression line is less than zero
* 'greater': the slope of the regression line is greater than zero
.. versionadded:: 1.7.0
Returns
-------
result : ``LinregressResult`` instance
The return value is an object with the following attributes:
slope : float
Slope of the regression line.
intercept : float
Intercept of the regression line.
rvalue : float
The Pearson correlation coefficient. The square of ``rvalue``
is equal to the coefficient of determination.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic. See `alternative` above for alternative
hypotheses.
stderr : float
Standard error of the estimated slope (gradient), under the
assumption of residual normality.
intercept_stderr : float
Standard error of the estimated intercept, under the assumption
of residual normality.
See Also
--------
scipy.optimize.curve_fit :
Use non-linear least squares to fit a function to data.
scipy.optimize.leastsq :
Minimize the sum of squares of a set of equations.
Notes
-----
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
slope, intercept, r, p, se = linregress(x, y)
With that style, however, the standard error of the intercept is not
available. To have access to all the computed values, including the
standard error of the intercept, use the return value as an object
with attributes, e.g.::
result = linregress(x, y)
print(result.intercept, result.intercept_stderr)
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
Generate some data:
>>> x = rng.random(10)
>>> y = 1.6*x + rng.random(10)
Perform the linear regression:
>>> res = stats.linregress(x, y)
Coefficient of determination (R-squared):
>>> print(f"R-squared: {res.rvalue**2:.6f}")
R-squared: 0.717533
Plot the data along with the fitted line:
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
Calculate 95% confidence interval on slope and intercept:
>>> # Two-sided inverse Students t-distribution
>>> # p - probability, df - degrees of freedom
>>> from scipy.stats import t
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
>>> ts = tinv(0.05, len(x)-2)
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
slope (95%): 1.453392 +/- 0.743465
>>> print(f"intercept (95%): {res.intercept:.6f}"
... f" +/- {ts*res.intercept_stderr:.6f}")
intercept (95%): 0.616950 +/- 0.544475
"""
TINY = 1.0e-20
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
if np.amax(x) == np.amin(x) and len(x) > 1:
raise ValueError("Cannot calculate a linear regression "
"if all x values are identical")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# Average sums of square differences from the mean
# ssxm = mean( (x-mean(x))^2 )
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
# R-value
# r = ssxym / sqrt( ssxm * ssym )
if ssxm == 0.0 or ssym == 0.0:
# If the denominator was going to be 0
r = np.asarray(np.nan if ssxym == 0 else 0.0)[()]
else:
r = ssxym / np.sqrt(ssxm * ssym)
# Test for numerical error propagation (make sure -1 < r < 1)
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
slope = ssxym / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
slope_stderr = 0.0
intercept_stderr = 0.0
else:
df = n - 2 # Number of degrees of freedom
# n-2 degrees of freedom because 2 has been used up
# to estimate the mean and standard deviation
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
dist = _SimpleStudentT(df)
prob = _get_pvalue(t, dist, alternative, xp=np)
prob = prob[()] if prob.ndim == 0 else prob
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
# Also calculate the standard error of the intercept
# The following relationship is used:
# ssxm = mean( (x-mean(x))^2 )
# = ssx - sx*sx
# = mean( x^2 ) - mean(x)^2
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
pvalue=prob, stderr=slope_stderr,
intercept_stderr=intercept_stderr)
def _xp_mean(x, /, *, axis=None, weights=None, keepdims=False, nan_policy='propagate',
dtype=None, xp=None):
r"""Compute the arithmetic mean along the specified axis.
Parameters
----------
x : real array
Array containing real numbers whose mean is desired.
axis : int or tuple of ints, default: None
If an int or tuple of ints, the axis or axes of the input along which
to compute the statistic. The statistic of each axis-slice (e.g. row)
of the input will appear in a corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic.
weights : real array, optional
If specified, an array of weights associated with the values in `x`;
otherwise ``1``. If `weights` and `x` do not have the same shape, the
arrays will be broadcasted before performing the calculation. See
Notes for details.
keepdims : boolean, optional
If this is set to ``True``, the axes which are reduced are left
in the result as dimensions with length one. With this option,
the result will broadcast correctly against the input array.
nan_policy : {'propagate', 'omit', 'raise'}, default: 'propagate'
Defines how to handle input NaNs.
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
which the statistic is computed, the corresponding entry of the output
will be NaN.
- ``omit``: NaNs will be omitted when performing the calculation.
If insufficient data remains in the axis slice along which the
statistic is computed, the corresponding entry of the output will be
NaN.
- ``raise``: if a NaN is present, a ``ValueError`` will be raised.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default is
the default float type of the array library; for floating point inputs,
the dtype is that of the input.
Returns
-------
out : array
The mean of each slice
Notes
-----
Let :math:`x_i` represent element :math:`i` of data `x` and let :math:`w_i`
represent the corresponding element of `weights` after broadcasting. Then the
(weighted) mean :math:`\bar{x}_w` is given by:
.. math::
\bar{x}_w = \frac{ \sum_{i=0}^{n-1} w_i x_i }
{ \sum_{i=0}^{n-1} w_i }
where :math:`n` is the number of elements along a slice. Note that this simplifies
to the familiar :math:`(\sum_i x_i) / n` when the weights are all ``1`` (default).
The behavior of this function with respect to weights is somewhat different
from that of `np.average`. For instance,
`np.average` raises an error when `axis` is not specified and the shapes of `x`
and the `weights` array are not the same; `xp_mean` simply broadcasts the two.
Also, `np.average` raises an error when weights sum to zero along a slice;
`xp_mean` computes the appropriate result. The intent is for this function's
interface to be consistent with the rest of `scipy.stats`.
Note that according to the formula, including NaNs with zero weights is not
the same as *omitting* NaNs with ``nan_policy='omit'``; in the former case,
the NaNs will continue to propagate through the calculation whereas in the
latter case, the NaNs are excluded entirely.
"""
# ensure that `x` and `weights` are array-API compatible arrays of identical shape
xp = array_namespace(x) if xp is None else xp
x = _asarray(x, dtype=dtype, subok=True)
weights = xp.asarray(weights, dtype=dtype) if weights is not None else weights
# to ensure that this matches the behavior of decorated functions when one of the
# arguments has size zero, it's easiest to call a similar decorated function.
if is_numpy(xp) and (xp_size(x) == 0
or (weights is not None and xp_size(weights) == 0)):
return gmean(x, weights=weights, axis=axis, keepdims=keepdims)
x, weights = xp_promote(x, weights, broadcast=True, force_floating=True, xp=xp)
if weights is not None:
x, weights = _share_masks(x, weights, xp=xp)
# handle the special case of zero-sized arrays
message = (too_small_1d_not_omit if (x.ndim == 1 or axis is None)
else too_small_nd_not_omit)
if xp_size(x) == 0:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = xp.mean(x, axis=axis, keepdims=keepdims)
if xp_size(res) != 0:
warnings.warn(message, SmallSampleWarning, stacklevel=2)
return res
contains_nan = _contains_nan(x, nan_policy, xp_omit_okay=True, xp=xp)
if weights is not None:
contains_nan_w = _contains_nan(weights, nan_policy, xp_omit_okay=True, xp=xp)
contains_nan = contains_nan | contains_nan_w
# Handle `nan_policy='omit'` by giving zero weight to NaNs, whether they
# appear in `x` or `weights`. Emit warning if there is an all-NaN slice.
# Test nan_policy before the implicit call to bool(contains_nan)
# to avoid raising on lazy xps on the default nan_policy='propagate'
lazy = is_lazy_array(x)
if nan_policy == 'omit' and (lazy or contains_nan):
nan_mask = xp.isnan(x)
if weights is not None:
nan_mask |= xp.isnan(weights)
if not lazy and xp.any(xp.all(nan_mask, axis=axis)):
message = (too_small_1d_omit if (x.ndim == 1 or axis is None)
else too_small_nd_omit)
warnings.warn(message, SmallSampleWarning, stacklevel=2)
weights = xp.ones_like(x) if weights is None else weights
x = xp.where(nan_mask, 0., x)
weights = xp.where(nan_mask, 0., weights)
# Perform the mean calculation itself
if weights is None:
return xp.mean(x, axis=axis, keepdims=keepdims)
# consider using `vecdot` if `axis` tuple support is added (data-apis/array-api#910)
norm = xp.sum(weights, axis=axis)
wsum = xp.sum(x * weights, axis=axis)
with np.errstate(divide='ignore', invalid='ignore'):
res = wsum/norm
# Respect `keepdims` and convert NumPy 0-D arrays to scalars
if keepdims:
if axis is None:
final_shape = (1,) * len(x.shape)
else:
# axis can be a scalar or sequence
axes = (axis,) if not isinstance(axis, Sequence) else axis
final_shape = list(x.shape)
for i in axes:
final_shape[i] = 1
res = xp.reshape(res, tuple(final_shape))
return res[()] if res.ndim == 0 else res
def _xp_var(x, /, *, axis=None, correction=0, keepdims=False, nan_policy='propagate',
dtype=None, xp=None):
# an array-api compatible function for variance with scipy.stats interface
# and features (e.g. `nan_policy`).
xp = array_namespace(x) if xp is None else xp
x = _asarray(x, subok=True)
# use `_xp_mean` instead of `xp.var` for desired warning behavior
# it would be nice to combine this with `_var`, which uses `_moment`
# and therefore warns when precision is lost, but that does not support
# `axis` tuples or keepdims. Eventually, `_axis_nan_policy` will simplify
# `axis` tuples and implement `keepdims` for non-NumPy arrays; then it will
# be easy.
kwargs = dict(axis=axis, nan_policy=nan_policy, dtype=dtype, xp=xp)
mean = _xp_mean(x, keepdims=True, **kwargs)
x = _asarray(x, dtype=mean.dtype, subok=True)
x_mean = _demean(x, mean, axis, xp=xp)
x_mean_conj = (xp.conj(x_mean) if xp.isdtype(x_mean.dtype, 'complex floating')
else x_mean) # crossref data-apis/array-api#824
var = _xp_mean(x_mean * x_mean_conj, keepdims=keepdims, **kwargs)
if correction != 0:
n = _length_nonmasked(x, axis, xp=xp)
# Or two lines with ternaries : )
# axis = range(x.ndim) if axis is None else axis
# n = math.prod(x.shape[i] for i in axis) if iterable(axis) else x.shape[axis]
n = xp.asarray(n, dtype=var.dtype, device=xp_device(x))
if nan_policy == 'omit':
nan_mask = xp.astype(xp.isnan(x), var.dtype)
n = n - xp.sum(nan_mask, axis=axis, keepdims=keepdims)
# Produce NaNs silently when n - correction <= 0
nc = n - correction
factor = xpx.apply_where(nc > 0, (n, nc), operator.truediv, fill_value=xp.nan)
var *= factor
return var[()] if var.ndim == 0 else var
| QuantileTestResult |
python | getsentry__sentry | src/sentry/testutils/helpers/task_runner.py | {
"start": 757,
"end": 3115
} | class ____:
def __init__(self) -> None:
self._active = False
self._orig_signal_send = TaskworkerTask._signal_send
self.queue: list[tuple[TaskworkerTask[Any, Any], tuple[Any, ...], dict[str, Any]]] = []
def _signal_send(
self,
task: TaskworkerTask[Any, Any],
args: tuple[Any, ...] = (),
kwargs: dict[str, Any] | None = None,
) -> None:
if not self._active:
raise AssertionError("task enqueued to burst runner while burst was not active!")
self.queue.append((task, args, {} if kwargs is None else kwargs))
@contextlib.contextmanager
def _patched(self) -> Generator[Self]:
if self._active:
raise AssertionError("nested BurstTaskRunner!")
with mock.patch.object(TaskworkerTask, "_signal_send", self._signal_send):
self._active = True
try:
yield self
finally:
self._active = False
@contextlib.contextmanager
def temporarily_enable_normal_task_processing(self) -> Generator[None]:
if not self._active:
raise AssertionError("cannot disable burst when not active")
with mock.patch.object(TaskworkerTask, "_signal_send", self._orig_signal_send):
self._active = False
try:
yield
finally:
self._active = True
def __call__(self, max_jobs: int | None = None) -> None:
if not self._active:
raise AssertionError("burst called outside of mocked context")
jobs = 0
while self.queue and (max_jobs is None or max_jobs > jobs):
task, args, kwargs = self.queue.pop(0)
try:
task(*args, **kwargs)
except BurstTaskRunnerRetryError:
self.queue.append((task, args, kwargs))
jobs += 1
if self.queue:
raise RuntimeError(f"Could not empty queue, last task items: {self.queue!r}")
def BurstTaskRunner() -> ContextManager[_BurstState]:
"""
A fixture for queueing up tasks and working them off in bursts.
The main interesting property is that one can run tasks at a later point in
the future, testing "concurrency" without actually spawning any kind of
worker.
"""
return _BurstState()._patched()
| _BurstState |
python | milvus-io__pymilvus | tests/test_bulk_writer_validators.py | {
"start": 2484,
"end": 4731
} | class ____:
def test_valid_list(self):
"""Test valid list of binary values"""
result = binary_vector_validator([1, 0, 1, 1, 0, 0, 1, 0], 8)
expected = np.packbits([1, 0, 1, 1, 0, 0, 1, 0], axis=-1).tolist()
assert result == expected
def test_invalid_list_length(self):
"""Test list with wrong dimension"""
with pytest.raises(MilvusException, match="length of the list must be equal to vector dimension"):
binary_vector_validator([1, 0, 1], 8)
def test_valid_bytes(self):
"""Test valid bytes input"""
data = b'\x00\x01'
result = binary_vector_validator(data, 16)
assert result == [0, 1]
def test_invalid_bytes_length(self):
"""Test bytes with wrong length"""
data = b'\x00'
with pytest.raises(MilvusException, match="length of the bytes must be equal to 8x of vector dimension"):
binary_vector_validator(data, 16)
def test_valid_numpy_uint8(self):
"""Test valid numpy array with uint8"""
arr = np.array([0, 1, 2], dtype=np.uint8)
result = binary_vector_validator(arr, 24)
assert result == [0, 1, 2]
def test_invalid_numpy_dtype(self):
"""Test numpy array with invalid dtype"""
arr = np.array([0, 1, 2], dtype=np.int32)
with pytest.raises(MilvusException, match='dtype must be "uint8"'):
binary_vector_validator(arr, 24)
def test_invalid_numpy_shape(self):
"""Test numpy array with wrong shape"""
arr = np.array([[0, 1], [2, 3]], dtype=np.uint8)
with pytest.raises(MilvusException, match="shape must be one dimension"):
binary_vector_validator(arr, 32)
def test_invalid_numpy_length(self):
"""Test numpy array with wrong dimension"""
arr = np.array([0, 1], dtype=np.uint8)
with pytest.raises(MilvusException, match="length must be equal to 8x of vector dimension"):
binary_vector_validator(arr, 24)
def test_invalid_type(self):
"""Test with invalid input type"""
with pytest.raises(MilvusException, match="only accept numpy.ndarray, list, bytes"):
binary_vector_validator("invalid", 8)
| TestBinaryVectorValidator |
python | ipython__ipython | IPython/external/qt_loaders.py | {
"start": 1171,
"end": 11863
} | class ____(importlib.abc.MetaPathFinder):
"""Import Hook that will guard against bad Qt imports
once IPython commits to a specific binding
"""
def __init__(self):
self.__forbidden = set()
def forbid(self, module_name):
sys.modules.pop(module_name, None)
self.__forbidden.add(module_name)
def find_spec(self, fullname, path, target=None):
if path:
return
if fullname in self.__forbidden:
raise ImportError(
"""
Importing %s disabled by IPython, which has
already imported an Incompatible QT Binding: %s
"""
% (fullname, loaded_api())
)
ID = ImportDenier()
sys.meta_path.insert(0, ID)
def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
modules = set(api_to_module.values())
modules.remove(api_to_module[api])
for mod in modules:
ID.forbid(mod)
def loaded_api():
"""Return which API is loaded, if any
If this returns anything besides None,
importing any other Qt binding is unsafe.
Returns
-------
None, 'pyside6', 'pyqt6', 'pyside2', 'pyside', 'pyqt', 'pyqt5', 'pyqtv1'
"""
if sys.modules.get("PyQt6.QtCore"):
return QT_API_PYQT6
elif sys.modules.get("PySide6.QtCore"):
return QT_API_PYSIDE6
elif sys.modules.get("PyQt5.QtCore"):
return QT_API_PYQT5
elif sys.modules.get("PySide2.QtCore"):
return QT_API_PYSIDE2
elif sys.modules.get("PyQt4.QtCore"):
if qtapi_version() == 2:
return QT_API_PYQT
else:
return QT_API_PYQTv1
elif sys.modules.get("PySide.QtCore"):
return QT_API_PYSIDE
return None
def has_binding(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
Parameters
----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
Which module to check for
Returns
-------
True if the relevant module appears to be importable
"""
module_name = api_to_module[api]
from importlib.util import find_spec
required = ['QtCore', 'QtGui', 'QtSvg']
if api in (QT_API_PYQT5, QT_API_PYSIDE2, QT_API_PYQT6, QT_API_PYSIDE6):
# QT5 requires QtWidgets too
required.append('QtWidgets')
for submod in required:
try:
spec = find_spec('%s.%s' % (module_name, submod))
except ImportError:
# Package (e.g. PyQt5) not found
return False
else:
if spec is None:
# Submodule (e.g. PyQt5.QtCore) not found
return False
if api == QT_API_PYSIDE:
# We can also safely check PySide version
import PySide
return PySide.__version_info__ >= (1, 0, 3)
return True
def qtapi_version():
"""Return which QString API has been set, if any
Returns
-------
The QString API version (1 or 2), or None if not set
"""
try:
import sip
except ImportError:
# as of PyQt5 5.11, sip is no longer available as a top-level
# module and needs to be imported from the PyQt5 namespace
try:
from PyQt5 import sip
except ImportError:
return
try:
return sip.getapi('QString')
except ValueError:
return
def can_import(api):
"""Safely query whether an API is importable, without importing it"""
if not has_binding(api):
return False
current = loaded_api()
if api == QT_API_PYQT_DEFAULT:
return current in [QT_API_PYQT6, None]
else:
return current in [api, None]
def import_pyqt4(version=2):
"""
Import PyQt4
Parameters
----------
version : 1, 2, or None
Which QString/QVariant API to use. Set to None to use the system
default
ImportErrors raised within this function are non-recoverable
"""
# The new-style string API (version=2) automatically
# converts QStrings to Unicode Python strings. Also, automatically unpacks
# QVariants to their underlying objects.
import sip
if version is not None:
sip.setapi('QString', version)
sip.setapi('QVariant', version)
from PyQt4 import QtGui, QtCore, QtSvg
if QtCore.PYQT_VERSION < 0x040700:
raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
QtCore.PYQT_VERSION_STR)
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# query for the API version (in case version == None)
version = sip.getapi('QString')
api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
return QtCore, QtGui, QtSvg, api
def import_pyqt5():
"""
Import PyQt5
ImportErrors raised within this function are non-recoverable
"""
from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
api = QT_API_PYQT5
return QtCore, QtGuiCompat, QtSvg, api
def import_pyqt6():
"""
Import PyQt6
ImportErrors raised within this function are non-recoverable
"""
from PyQt6 import QtCore, QtSvg, QtWidgets, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType("QtGuiCompat")
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
api = QT_API_PYQT6
return QtCore, QtGuiCompat, QtSvg, api
def import_pyside():
"""
Import PySide
ImportErrors raised within this function are non-recoverable
"""
from PySide import QtGui, QtCore, QtSvg
return QtCore, QtGui, QtSvg, QT_API_PYSIDE
def import_pyside2():
"""
Import PySide2
ImportErrors raised within this function are non-recoverable
"""
from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
def import_pyside6():
"""
Import PySide6
ImportErrors raised within this function are non-recoverable
"""
def get_attrs(module):
return {
name: getattr(module, name)
for name in dir(module)
if not name.startswith("_")
}
from PySide6 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType("QtGuiCompat")
QtGuiCompat.__dict__.update(QtGui.__dict__)
if QtCore.__version_info__ < (6, 7):
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
else:
QtGuiCompat.__dict__.update(get_attrs(QtWidgets))
QtGuiCompat.__dict__.update(get_attrs(QtPrintSupport))
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE6
def load_qt(api_options):
"""
Attempt to import Qt, given a preference list
of permissible bindings
It is safe to call this function multiple times.
Parameters
----------
api_options : List of strings
The order of APIs to try. Valid items are 'pyside', 'pyside2',
'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
Returns
-------
A tuple of QtCore, QtGui, QtSvg, QT_API
The first three are the Qt modules. The last is the
string indicating which module was loaded.
Raises
------
ImportError, if it isn't possible to import any requested
bindings (either because they aren't installed, or because
an incompatible library has already been installed)
"""
loaders = {
# Qt6
QT_API_PYQT6: import_pyqt6,
QT_API_PYSIDE6: import_pyside6,
# Qt5
QT_API_PYQT5: import_pyqt5,
QT_API_PYSIDE2: import_pyside2,
# Qt4
QT_API_PYSIDE: import_pyside,
QT_API_PYQT: import_pyqt4,
QT_API_PYQTv1: partial(import_pyqt4, version=1),
# default
QT_API_PYQT_DEFAULT: import_pyqt6,
}
for api in api_options:
if api not in loaders:
raise RuntimeError(
"Invalid Qt API %r, valid values are: %s" %
(api, ", ".join(["%r" % k for k in loaders.keys()])))
if not can_import(api):
continue
#cannot safely recover from an ImportError during this
result = loaders[api]()
api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
commit_api(api)
return result
else:
# Clear the environment variable since it doesn't work.
if "QT_API" in os.environ:
del os.environ["QT_API"]
raise ImportError(
"""
Could not load requested Qt binding. Please ensure that
PyQt4 >= 4.7, PyQt5, PyQt6, PySide >= 1.0.3, PySide2, or
PySide6 is available, and only one is imported per session.
Currently-imported Qt library: %r
PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
PyQt6 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
PySide2 installed: %s
PySide6 installed: %s
Tried to load: %r
"""
% (
loaded_api(),
has_binding(QT_API_PYQT5),
has_binding(QT_API_PYQT6),
has_binding(QT_API_PYSIDE2),
has_binding(QT_API_PYSIDE6),
api_options,
)
)
def enum_factory(QT_API, QtCore):
"""Construct an enum helper to account for PyQt5 <-> PyQt6 changes."""
@lru_cache(None)
def _enum(name):
# foo.bar.Enum.Entry (PyQt6) <=> foo.bar.Entry (non-PyQt6).
return operator.attrgetter(
name if QT_API == QT_API_PYQT6 else name.rpartition(".")[0]
)(sys.modules[QtCore.__package__])
return _enum
| ImportDenier |
python | bokeh__bokeh | tests/unit/bokeh/core/test_enums.py | {
"start": 3127,
"end": 4547
} | class ____:
def test_basic(self) -> None:
e = bce.enumeration("foo", "bar", "baz")
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_case(self) -> None:
e = bce.enumeration("foo", "bar", "baz", case_sensitive=False)
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "FOO", "bar", "bAr", "baz", "BAZ"]:
assert x in e
assert "junk" not in e
def test_quote(self) -> None:
e = bce.enumeration("foo", "bar", "baz", quote=True)
assert isinstance(e, bce.Enumeration)
assert str(e) == 'Enumeration("foo", "bar", "baz")' or str(e) == "Enumeration('foo', 'bar', 'baz')"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_default(self) -> None:
# this is private but used by properties
e = bce.enumeration("foo", "bar", "baz")
assert e._default == "foo"
def test_len(self) -> None:
e = bce.enumeration("foo", "bar", "baz")
assert len(e) == 3
| Test_enumeration |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 14680,
"end": 19545
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: MoshiConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[MoshiConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
| MoshiRotaryEmbedding |
python | encode__django-rest-framework | tests/test_serializer.py | {
"start": 28314,
"end": 28856
} | class ____:
# Serializer.set_value() modifies the first parameter in-place.
s = serializers.Serializer()
def test_no_keys(self):
ret = {'a': 1}
self.s.set_value(ret, [], {'b': 2})
assert ret == {'a': 1, 'b': 2}
def test_one_key(self):
ret = {'a': 1}
self.s.set_value(ret, ['x'], 2)
assert ret == {'a': 1, 'x': 2}
def test_nested_key(self):
ret = {'a': 1}
self.s.set_value(ret, ['x', 'y'], 2)
assert ret == {'a': 1, 'x': {'y': 2}}
| TestSetValueMethod |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/slicing_router.py | {
"start": 685,
"end": 829
} | class ____(Exception):
"""
Exception raised when the configuration for the SlicingRouter is invalid.
"""
| SlicingConfigurationException |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/lsp.py | {
"start": 1347,
"end": 1423
} | class ____:
NONE = 0
FULL = 1
INCREMENTAL = 2
| TextDocumentSyncKind |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 164236,
"end": 170776
} | class ____:
# Preserving original test cases.
# Recomputed statistics and p-values with R t.test, e.g.
# options(digits=16)
# t.test(c(-1., 0., 1.), mu=2)
X1 = [-1., 0., 1.]
X2 = [0., 1., 2.]
T1_0 = 0.
P1_0 = 1.
T1_1 = -1.7320508075689
P1_1 = 0.2254033307585
T1_2 = -3.4641016151378
P1_2 = 0.07417990022745
T2_0 = 1.7320508075689
P2_0 = 0.2254033307585
P1_1_l = P1_1 / 2
P1_1_g = 1 - (P1_1 / 2)
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
def test_onesample(self, xp):
with warnings.catch_warnings(), \
np.errstate(invalid="ignore", divide="ignore"):
warnings.filterwarnings(
"ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)
a = xp.asarray(4.) if not is_numpy(xp) else 4.
t, p = stats.ttest_1samp(a, 3.)
xp_assert_equal(t, xp.asarray(xp.nan))
xp_assert_equal(p, xp.asarray(xp.nan))
t, p = stats.ttest_1samp(xp.asarray(self.X1), 0.)
xp_assert_close(t, xp.asarray(self.T1_0))
xp_assert_close(p, xp.asarray(self.P1_0))
res = stats.ttest_1samp(xp.asarray(self.X1), 0.)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, xp=xp)
t, p = stats.ttest_1samp(xp.asarray(self.X2), 0.)
xp_assert_close(t, xp.asarray(self.T2_0))
xp_assert_close(p, xp.asarray(self.P2_0))
t, p = stats.ttest_1samp(xp.asarray(self.X1), 1.)
xp_assert_close(t, xp.asarray(self.T1_1))
xp_assert_close(p, xp.asarray(self.P1_1))
t, p = stats.ttest_1samp(xp.asarray(self.X1), 2.)
xp_assert_close(t, xp.asarray(self.T1_2))
xp_assert_close(p, xp.asarray(self.P1_2))
def test_onesample_nan_policy_propagate(self, xp):
x = stats.norm.rvs(loc=5, scale=10, size=51, random_state=7654567)
x[50] = np.nan
x = xp.asarray(x, dtype=xp_default_dtype(xp))
res = stats.ttest_1samp(x, 5.0)
xp_assert_equal(res.statistic, xp.asarray(xp.nan))
xp_assert_equal(res.pvalue, xp.asarray(xp.nan))
@skip_xp_backends(eager_only=True, reason="lazy arrays don't do 'raise'.")
def test_onesample_nan_policy_omit_raise(self, xp):
x = stats.norm.rvs(loc=5, scale=10, size=51, random_state=7654567)
x[50] = np.nan
x = xp.asarray(x, dtype=xp_default_dtype(xp))
res = stats.ttest_1samp(x, 5.0, nan_policy='omit')
xp_assert_close(res.statistic, xp.asarray(-1.6412624074367159))
xp_assert_close(res.pvalue, xp.asarray(0.107147027334048005))
with pytest.raises(ValueError, match="The input contains nan values"):
stats.ttest_1samp(x, 5.0, nan_policy='raise')
with pytest.raises(ValueError, match="nan_policy must be one of"):
stats.ttest_1samp(x, 5.0, nan_policy='foobar')
@pytest.mark.filterwarnings("ignore:divide by zero encountered in divide")
def test_1samp_alternative(self, xp):
message = "`alternative` must be 'less', 'greater', or 'two-sided'."
with pytest.raises(ValueError, match=message):
stats.ttest_1samp(xp.asarray(self.X1), 0., alternative="error")
t, p = stats.ttest_1samp(xp.asarray(self.X1), 1., alternative="less")
xp_assert_close(p, xp.asarray(self.P1_1_l))
xp_assert_close(t, xp.asarray(self.T1_1))
t, p = stats.ttest_1samp(xp.asarray(self.X1), 1., alternative="greater")
xp_assert_close(p, xp.asarray(self.P1_1_g))
xp_assert_close(t, xp.asarray(self.T1_1))
@skip_xp_backends('jax.numpy', reason='Generic stdtrit mutates array.')
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_1samp_ci_1d(self, xp, alternative):
# test confidence interval method against reference values
rng = np.random.default_rng(8066178009154342972)
n = 10
x = rng.normal(size=n, loc=1.5, scale=2)
popmean = rng.normal() # this shouldn't affect confidence interval
# Reference values generated with R t.test:
# options(digits=16)
# x = c(2.75532884, 0.93892217, 0.94835861, 1.49489446, -0.62396595,
# -1.88019867, -1.55684465, 4.88777104, 5.15310979, 4.34656348)
# t.test(x, conf.level=0.85, alternative='l')
dtype = xp.asarray(1.0).dtype
x = xp.asarray(x, dtype=dtype)
popmean = xp.asarray(popmean, dtype=dtype)
ref = {'two-sided': [0.3594423211709136, 2.9333455028290860],
'greater': [0.7470806207371626, np.inf],
'less': [-np.inf, 2.545707203262837]}
res = stats.ttest_1samp(x, popmean=popmean, alternative=alternative)
ci = res.confidence_interval(confidence_level=0.85)
xp_assert_close(ci.low, xp.asarray(ref[alternative][0]))
xp_assert_close(ci.high, xp.asarray(ref[alternative][1]))
xp_assert_equal(res.df, xp.asarray(n-1))
def test_1samp_ci_iv(self, xp):
# test `confidence_interval` method input validation
res = stats.ttest_1samp(xp.arange(10.), 0.)
message = '`confidence_level` must be a number between 0 and 1.'
with pytest.raises(ValueError, match=message):
res.confidence_interval(confidence_level=10)
@skip_xp_backends(np_only=True, reason='Too slow.')
@pytest.mark.xslow
@hypothesis.given(alpha=hypothesis.strategies.floats(1e-15, 1-1e-15),
data_axis=ttest_data_axis_strategy())
@pytest.mark.parametrize('alternative', ['less', 'greater'])
def test_pvalue_ci(self, alpha, data_axis, alternative, xp):
# test relationship between one-sided p-values and confidence intervals
data, axis = data_axis
data = xp.asarray(data)
res = stats.ttest_1samp(data, 0.,
alternative=alternative, axis=axis)
l, u = res.confidence_interval(confidence_level=alpha)
popmean = l if alternative == 'greater' else u
popmean = xp.expand_dims(popmean, axis=axis)
res = stats.ttest_1samp(data, popmean, alternative=alternative, axis=axis)
shape = list(data.shape)
shape.pop(axis)
# `float64` is used to correspond with extreme range of `alpha`
ref = xp.broadcast_to(xp.asarray(1-alpha, dtype=xp.float64), shape)
xp_assert_close(res.pvalue, ref)
| TestStudentTest |
python | realpython__materials | django-pagination/terms/views.py | {
"start": 287,
"end": 1344
} | class ____(ListView):
paginate_by = 5
model = Keyword
def listing(request, page):
keywords = Keyword.objects.all().order_by("name")
paginator = Paginator(keywords, 2)
page_object = paginator.get_page(page)
page_object.adjusted_elided_pages = paginator.get_elided_page_range(page)
context = {"page_obj": page_object}
return render(request, "terms/keyword_list.html", context)
def listing_api(request):
page_number = request.GET.get("page", 1)
per_page = request.GET.get("per_page", 2)
startswith = request.GET.get("startswith", "")
keywords = Keyword.objects.filter(name__startswith=startswith)
paginator = Paginator(keywords, per_page)
page_obj = paginator.get_page(page_number)
data = [{"name": kw.name} for kw in page_obj.object_list]
payload = {
"page": {
"current": page_obj.number,
"has_next": page_obj.has_next(),
"has_previous": page_obj.has_previous(),
},
"data": data,
}
return JsonResponse(payload)
| KeywordListView |
python | OmkarPathak__pygorithm | tests/test_backtracking.py | {
"start": 2706,
"end": 4038
} | class ____(unittest.TestCase):
def test_kmp_search(self):
"""Test KMP string search"""
from pygorithm.strings import kmp_search
text = "ABABDABACDABABCABCABCABCABC"
pattern = "ABABCABCABCABC"
matches = kmp_search.kmp_search(text, pattern)
self.assertGreater(len(matches), 0)
# Test first occurrence - "ABAD" appears at index 2 in "ABABDABACDABABCABCABCABCABC"
first_match = kmp_search.kmp_search_first(text, "ABAD")
# Let's check what the actual result is and fix the test
expected_index = text.find("ABAD") # Use Python's built-in to verify
self.assertEqual(first_match, expected_index)
def test_edit_distance(self):
"""Test edit distance algorithm"""
from pygorithm.strings import edit_distance
# Test basic edit distance
dist = edit_distance.edit_distance("kitten", "sitting")
self.assertEqual(dist, 3)
# Test similarity ratio
ratio = edit_distance.similarity_ratio("hello", "hello")
self.assertEqual(ratio, 1.0)
# Test one edit away
self.assertTrue(edit_distance.is_one_edit_away("cat", "bat"))
self.assertFalse(edit_distance.is_one_edit_away("cat", "dog"))
| TestNewStringAlgorithms |
python | realpython__materials | wordcount/tests/fixtures.py | {
"start": 295,
"end": 1021
} | class ____:
content: bytes
counts: tuple[int, ...]
@cached_property
def path(self) -> Path:
return Path("-")
def format_line(self, max_digits=None, selected=None):
if selected is None:
selected = 8 + 4 + 1
numbers = [
self.counts[i] for i in range(4) if selected & (2 ** (3 - i))
]
if max_digits is None:
max_digits = len(str(max(numbers)))
counts = " ".join(
filter(None, [f"{number:{max_digits}}" for number in numbers])
)
if self.path.name == "-":
return f"{counts}\n".encode("utf-8")
else:
return f"{counts} {self.path}\n".encode("utf-8")
@dataclass
| FakeFile |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/constants_ast.py | {
"start": 3579,
"end": 3754
} | class ____(Exception):
# a control flow exception which we raise in ConstantsVisitor when the
# number of constants in a module gets too large.
pass
| TooManyConstants |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_hitl.py | {
"start": 19872,
"end": 23213
} | class ____:
def test_init_with_options(self) -> None:
with pytest.raises(ValueError, match="Passing options to ApprovalOperator is not allowed."):
ApprovalOperator(
task_id="hitl_test",
subject="This is subject",
body="This is body",
options=["1", "2", "3", "4", "5"],
params={"input": 1},
)
def test_init_with_multiple_set_to_true(self) -> None:
with pytest.raises(ValueError, match="Passing multiple to ApprovalOperator is not allowed."):
ApprovalOperator(
task_id="hitl_test",
subject="This is subject",
params={"input": 1},
multiple=True,
)
def test_execute_complete(self) -> None:
hitl_op = ApprovalOperator(
task_id="hitl_test",
subject="This is subject",
)
responded_at_dt = timezone.utcnow()
ret = hitl_op.execute_complete(
context={},
event={
"chosen_options": ["Approve"],
"params_input": {},
"responded_at": responded_at_dt,
"responded_by_user": {"id": "test", "name": "test"},
},
)
assert ret == {
"chosen_options": ["Approve"],
"params_input": {},
"responded_at": responded_at_dt,
"responded_by_user": {"id": "test", "name": "test"},
}
def test_execute_complete_with_downstream_tasks(
self, dag_maker: DagMaker, get_context_from_model_ti: Any
) -> None:
with dag_maker("hitl_test_dag", serialized=True):
hitl_op = ApprovalOperator(
task_id="hitl_test",
subject="This is subject",
)
(hitl_op >> EmptyOperator(task_id="op1"))
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("hitl_test")
with pytest.raises(DownstreamTasksSkipped) as exc_info:
hitl_op.execute_complete(
context=get_context_from_model_ti(ti),
event={
"chosen_options": ["Reject"],
"params_input": {},
"responded_at": timezone.utcnow(),
"responded_by_user": {"id": "test", "name": "test"},
},
)
assert set(exc_info.value.tasks) == {"op1"}
def test_execute_complete_with_fail_on_reject_set_to_true(
self, dag_maker: DagMaker, get_context_from_model_ti: Any
) -> None:
with dag_maker("hitl_test_dag", serialized=True):
hitl_op = ApprovalOperator(task_id="hitl_test", subject="This is subject", fail_on_reject=True)
(hitl_op >> EmptyOperator(task_id="op1"))
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("hitl_test")
with pytest.raises(HITLRejectException):
hitl_op.execute_complete(
context=get_context_from_model_ti(ti),
event={
"chosen_options": ["Reject"],
"params_input": {},
"responded_at": timezone.utcnow(),
"responded_by_user": {"id": "test", "name": "test"},
},
)
| TestApprovalOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1027407,
"end": 1028138
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseRepositoryProjectsSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated repository projects setting."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the repository
projects setting.
"""
| UpdateEnterpriseRepositoryProjectsSettingPayload |
python | django-guardian__django-guardian | guardian/testapp/tests/test_shortcuts.py | {
"start": 13615,
"end": 27431
} | class ____(TestCase):
"""
Tests get_users_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(model="foo", app_label="guardian-tests")
self.obj2 = ContentType.objects.create(model="bar", app_label="guardian-tests")
self.user1 = User.objects.create(username="user1")
self.user2 = User.objects.create(username="user2")
self.user3 = User.objects.create(username="user3")
self.group1 = Group.objects.create(name="group1")
self.group2 = Group.objects.create(name="group2")
self.group3 = Group.objects.create(name="group3")
def test_empty(self):
result = get_users_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertEqual(list(result), [])
result = get_users_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1)
result_vals = result.values_list("username", flat=True)
self.assertEqual(
set(result_vals),
{user.username for user in (self.user1, self.user2)},
)
def test_only_with_perms_in_groups(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_groups_with_perms(self.obj1, only_with_perms_in=("change_contenttype",))
result_vals = result.values_list("name", flat=True)
self.assertEqual(
set(result_vals),
{self.group1.name},
)
def test_only_with_perms_in_groups_attached(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_groups_with_perms(self.obj1, only_with_perms_in=("delete_contenttype",), attach_perms=True)
expected = {self.group2: ("change_contenttype", "delete_contenttype")}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_only_with_perms_in_users(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1, only_with_perms_in=("change_contenttype",))
result_vals = result.values_list("username", flat=True)
self.assertEqual(
set(result_vals),
{self.user1.username},
)
def test_only_with_perms_in_users_with_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
# assign perms to groups
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("add_contenttype", self.group3, self.obj2)
result = get_users_with_perms(
self.obj1, only_with_perms_in=("change_contenttype", "delete_contenttype"), with_group_users=True
)
result_vals = result.values_list("username", flat=True)
self.assertEqual(
set(result_vals),
{self.user1.username, self.user2.username},
)
def test_only_with_perms_in_users_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
# assign perms to groups
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
# assign perms to user
assign_perm("change_contenttype", self.user2, self.obj1)
result = get_users_with_perms(
self.obj1, only_with_perms_in=("change_contenttype", "delete_contenttype"), with_group_users=False
)
result_vals = result.values_list("username", flat=True)
self.assertEqual(
set(result_vals),
{self.user2.username},
)
def test_only_with_perms_in_users_attached(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1, only_with_perms_in=("delete_contenttype",), attach_perms=True)
expected = {self.user2: ("change_contenttype", "delete_contenttype")}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_users_groups_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list("pk", flat=True)
self.assertEqual(set(result), {u.pk for u in (self.user1, self.user2)})
def test_users_groups_after_removal(self):
self.test_users_groups_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list("pk", flat=True)
self.assertEqual(
set(result),
{self.user2.pk},
)
def test_attach_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.user3, self.obj2)
# Check contenttype1
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {
self.user1: ["change_contenttype"],
self.user2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
# Check contenttype2
result = get_users_with_perms(self.obj2, attach_perms=True)
expected = {
self.user3: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_attach_groups_only_has_perms(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {self.user1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_mixed(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj2)
assign_perm("change_contenttype", self.user3, self.obj2)
assign_perm("change_%s" % user_module_name, self.user3, self.user1)
result = get_users_with_perms(self.obj1)
self.assertEqual(
set(result),
{self.user1, self.user2},
)
def test_with_superusers(self):
admin = User.objects.create(username="admin", is_superuser=True)
assign_perm("change_contenttype", self.user1, self.obj1)
result = get_users_with_perms(self.obj1, with_superusers=True)
self.assertEqual(
set(result),
{self.user1, admin},
)
def test_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False)
expected = {self.user2}
self.assertEqual(set(result), expected)
def test_without_group_users_but_perms_attached(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False, attach_perms=True)
expected = {self.user2: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_direct_perms_only(self):
admin = User.objects.create(username="admin", is_superuser=True)
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group1)
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", admin, self.obj1)
assign_perm("delete_contenttype", self.group1, self.obj1)
expected = {self.user1, self.user2, admin}
result = get_users_with_perms(self.obj1, with_superusers=False, with_group_users=True)
self.assertEqual(set(result), expected)
self.assertEqual(set(get_user_perms(self.user1, self.obj1)), {"change_contenttype"})
self.assertEqual(set(get_user_perms(self.user2, self.obj1)), set())
self.assertEqual(set(get_user_perms(admin, self.obj1)), {"delete_contenttype"})
result = get_users_with_perms(self.obj1, with_superusers=False, with_group_users=False)
expected = {self.user1, admin}
self.assertEqual(set(result), expected)
self.assertEqual(set(get_group_perms(self.user1, self.obj1)), {"delete_contenttype"})
self.assertEqual(set(get_group_perms(self.user2, self.obj1)), {"delete_contenttype"})
self.assertEqual(set(get_group_perms(self.group1, self.obj1)), {"delete_contenttype"})
self.assertEqual(set(get_group_perms(self.group2, self.obj1)), set())
self.assertEqual(set(get_group_perms(admin, self.obj1)), set())
expected_permissions = ["add_contenttype", "change_contenttype", "delete_contenttype"]
expected_permissions.append("view_contenttype")
self.assertEqual(set(get_perms(admin, self.obj1)), set(expected_permissions))
self.assertEqual(set(get_perms(self.user1, self.obj1)), {"change_contenttype", "delete_contenttype"})
self.assertEqual(set(get_perms(self.user2, self.obj1)), {"delete_contenttype"})
self.assertEqual(set(get_perms(self.group1, self.obj1)), {"delete_contenttype"})
self.assertEqual(set(get_perms(self.group2, self.obj1)), set())
def test_direct_perms_only_perms_attached(self):
admin = User.objects.create(username="admin", is_superuser=True)
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group1)
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", admin, self.obj1)
assign_perm("delete_contenttype", self.group1, self.obj1)
expected = {
self.user1: ["change_contenttype", "delete_contenttype"],
admin: ["add_contenttype", "change_contenttype", "delete_contenttype"],
self.user2: ["delete_contenttype"],
}
expected[admin].append("view_contenttype")
result = get_users_with_perms(self.obj1, attach_perms=True, with_superusers=False, with_group_users=True)
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
result = get_users_with_perms(self.obj1, attach_perms=True, with_superusers=False, with_group_users=False)
expected = {self.user1: ["change_contenttype"], admin: ["delete_contenttype"]}
self.assertEqual(result, expected)
def test_without_group_users_no_result(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True, with_group_users=False)
expected = {}
self.assertEqual(result, expected)
def test_without_group_users_no_result_but_with_superusers(self):
admin = User.objects.create(username="admin", is_superuser=True)
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False, with_superusers=True)
expected = [admin]
self.assertEqual(set(result), set(expected))
| GetUsersWithPermsTest |
python | has2k1__plotnine | plotnine/scales/scale_manual.py | {
"start": 317,
"end": 1400
} | class ____(scale_discrete):
"""
Abstract class for manual scales
"""
values: InitVar[Sequence[Any] | dict[Any, Any]]
"""
Exact values the scale should map to.
"""
def __post_init__(self, values):
from collections.abc import Iterable, Sized
super().__post_init__()
if (
isinstance(self.breaks, Iterable)
and isinstance(self.breaks, Sized)
and len(self.breaks) == len(values)
and not isinstance(values, Mapping)
):
values = dict(zip(self.breaks, values))
def palette(n):
max_n = len(values)
if n > max_n:
msg = (
f"The palette of {self.__class__.__name__} can return "
f"a maximum of {max_n} values. {n} were requested "
f"from it."
)
warn(msg, PlotnineWarning)
return values
# manual scales have a unique palette that return
self.palette = palette # type: ignore
@dataclass
| _scale_manual |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 43390,
"end": 44116
} | class ____:
"""
Mix-in for all input elements (input, select, and textarea)
"""
@property
def name(self):
"""
Get/set the name of the element
"""
return self.get('name')
@name.setter
def name(self, value):
self.set('name', value)
@name.deleter
def name(self):
attrib = self.attrib
if 'name' in attrib:
del attrib['name']
def __repr__(self):
type_name = getattr(self, 'type', None)
if type_name:
type_name = ' type=%r' % type_name
else:
type_name = ''
return '<%s %x name=%r%s>' % (
self.__class__.__name__, id(self), self.name, type_name)
| InputMixin |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 104889,
"end": 105632
} | class ____(TestCase):
def test_basic(self):
for size, expected in [
(0, []),
(1, [(True, True, '0')]),
(2, [(True, False, '0'), (False, True, '1')]),
(3, [(True, False, '0'), (False, False, '1'), (False, True, '2')]),
(
4,
[
(True, False, '0'),
(False, False, '1'),
(False, False, '2'),
(False, True, '3'),
],
),
]:
with self.subTest(size=size):
iterable = map(str, range(size))
actual = list(mi.mark_ends(iterable))
self.assertEqual(actual, expected)
| MarkEndsTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass16.py | {
"start": 200,
"end": 270
} | class ____(C):
def __init__(self, x: int):
pass
@dataclass
| B |
python | wandb__wandb | wandb/automations/_generated/create_automation.py | {
"start": 220,
"end": 302
} | class ____(GQLResult):
result: Optional[CreateAutomationResult]
| CreateAutomation |
python | sphinx-doc__sphinx | sphinx/ext/todo.py | {
"start": 2801,
"end": 3218
} | class ____(SphinxDirective):
"""A list of all todo entries."""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
| TodoList |
python | django__django | tests/admin_views/admin.py | {
"start": 29157,
"end": 29323
} | class ____(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return "/worker_inline/%s/%s/" % (obj.surname, obj.name)
| WorkerInlineAdmin |
python | spack__spack | lib/spack/spack/environment/list.py | {
"start": 10492,
"end": 10604
} | class ____(SpecListError):
"""Error class for undefined references in Spack stacks."""
| UndefinedReferenceError |
python | davidhalter__parso | parso/python/errors.py | {
"start": 25415,
"end": 25683
} | class ____(SyntaxRule):
message = "import * only allowed at module level"
def is_issue(self, node):
return node.is_star_import() and self._normalizer.context.parent_context is not None
@ErrorFinder.register_rule(type='import_from')
| _ImportStarInFunction |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 16693,
"end": 23811
} | class ____:
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function. This function
is intended to be called later by one of:
* Device-specific test instantiation via instantiate_device_type_tests(). Note that for this
case there is no need to explicitly parametrize over device type, as that is handled separately.
* Device-agnostic parametrized test instantiation via instantiate_parametrized_tests().
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 4-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
decorator_fn (callable): Callable[[Dict], List] for list of decorators to apply given param_kwargs
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = list(old_parametrize_fn(test, generic_cls, device_cls))
for (old_test, old_test_name, old_param_kwargs, old_dec_fn) in old_tests:
for (new_test, new_test_name, new_param_kwargs, new_dec_fn) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
f'decorators is not supported. For test "{test.__name__}", the following parameters '
f'are handled multiple times: {redundant_params}')
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
def merged_decorator_fn(param_kwargs, old_dec_fn=old_dec_fn, new_dec_fn=new_dec_fn):
return list(old_dec_fn(param_kwargs)) + list(new_dec_fn(param_kwargs))
yield (new_test, merged_test_name, full_param_kwargs, merged_decorator_fn)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names. This should be used instead of
instantiate_device_type_tests() if the test class contains device-agnostic tests.
You can also use it as a class decorator. E.g.
```
@instantiate_parametrized_tests
class TestFoo(TestCase):
...
```
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), f"Redefinition of test {name}"
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs, decorator_fn) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = f'{test.__name__}_{test_suffix}'
# Apply decorators based on full param kwargs.
for decorator in decorator_fn(param_kwargs):
test = decorator(test)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
return generic_cls
| _TestParametrizer |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 200752,
"end": 206199
} | class ____(GeneratedAirbyteSource):
class HTTPSPublicWeb:
@public
def __init__(self, user_agent: Optional[bool] = None):
self.storage = "HTTPS"
self.user_agent = check.opt_bool_param(user_agent, "user_agent")
class GCSGoogleCloudStorage:
@public
def __init__(self, service_account_json: Optional[str] = None):
self.storage = "GCS"
self.service_account_json = check.opt_str_param(
service_account_json, "service_account_json"
)
class S3AmazonWebServices:
@public
def __init__(
self,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
):
self.storage = "S3"
self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")
self.aws_secret_access_key = check.opt_str_param(
aws_secret_access_key, "aws_secret_access_key"
)
class AzBlobAzureBlobStorage:
@public
def __init__(
self,
storage_account: str,
sas_token: Optional[str] = None,
shared_key: Optional[str] = None,
):
self.storage = "AzBlob"
self.storage_account = check.str_param(storage_account, "storage_account")
self.sas_token = check.opt_str_param(sas_token, "sas_token")
self.shared_key = check.opt_str_param(shared_key, "shared_key")
class SSHSecureShell:
@public
def __init__(
self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None
):
self.storage = "SSH"
self.user = check.str_param(user, "user")
self.password = check.opt_str_param(password, "password")
self.host = check.str_param(host, "host")
self.port = check.opt_str_param(port, "port")
class SCPSecureCopyProtocol:
@public
def __init__(
self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None
):
self.storage = "SCP"
self.user = check.str_param(user, "user")
self.password = check.opt_str_param(password, "password")
self.host = check.str_param(host, "host")
self.port = check.opt_str_param(port, "port")
class SFTPSecureFileTransferProtocol:
@public
def __init__(
self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None
):
self.storage = "SFTP"
self.user = check.str_param(user, "user")
self.password = check.opt_str_param(password, "password")
self.host = check.str_param(host, "host")
self.port = check.opt_str_param(port, "port")
@public
def __init__(
self,
name: str,
dataset_name: str,
format: str,
url: str,
provider: Union[
"FileSecureSource.HTTPSPublicWeb",
"FileSecureSource.GCSGoogleCloudStorage",
"FileSecureSource.S3AmazonWebServices",
"FileSecureSource.AzBlobAzureBlobStorage",
"FileSecureSource.SSHSecureShell",
"FileSecureSource.SCPSecureCopyProtocol",
"FileSecureSource.SFTPSecureFileTransferProtocol",
],
reader_options: Optional[str] = None,
):
"""Airbyte Source for File Secure.
Documentation can be found at https://docs.airbyte.com/integrations/sources/file
Args:
name (str): The name of the destination.
dataset_name (str): The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only).
format (str): The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs).
reader_options (Optional[str]): This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.
url (str): The URL path to access the file which should be replicated.
provider (Union[FileSecureSource.HTTPSPublicWeb, FileSecureSource.GCSGoogleCloudStorage, FileSecureSource.S3AmazonWebServices, FileSecureSource.AzBlobAzureBlobStorage, FileSecureSource.SSHSecureShell, FileSecureSource.SCPSecureCopyProtocol, FileSecureSource.SFTPSecureFileTransferProtocol]): The storage Provider or Location of the file(s) which should be replicated.
"""
self.dataset_name = check.str_param(dataset_name, "dataset_name")
self.format = check.str_param(format, "format")
self.reader_options = check.opt_str_param(reader_options, "reader_options")
self.url = check.str_param(url, "url")
self.provider = check.inst_param(
provider,
"provider",
(
FileSecureSource.HTTPSPublicWeb,
FileSecureSource.GCSGoogleCloudStorage,
FileSecureSource.S3AmazonWebServices,
FileSecureSource.AzBlobAzureBlobStorage,
FileSecureSource.SSHSecureShell,
FileSecureSource.SCPSecureCopyProtocol,
FileSecureSource.SFTPSecureFileTransferProtocol,
),
)
super().__init__("File Secure", name)
| FileSecureSource |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 223669,
"end": 225591
} | class ____(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super().__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
force_strided = False
indices = self.dst.original_indices
for idx in indices:
if isinstance(idx, SliceNode) and not (idx.start.is_none and
idx.stop.is_none and
idx.step.is_none):
force_strided = True
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code,
force_strided=force_strided)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
| MemoryCopyScalar |
python | pytest-dev__pluggy | testing/test_hookcaller.py | {
"start": 724,
"end": 12289
} | class ____:
def __init__(self, hc: HookCaller) -> None:
self.hc = hc
def __call__(
self,
tryfirst: bool = False,
trylast: bool = False,
hookwrapper: bool = False,
wrapper: bool = False,
) -> Callable[[FuncT], FuncT]:
def wrap(func: FuncT) -> FuncT:
hookimpl(
tryfirst=tryfirst,
trylast=trylast,
hookwrapper=hookwrapper,
wrapper=wrapper,
)(func)
self.hc._add_hookimpl(
HookImpl(None, "<temp>", func, func.example_impl), # type: ignore[attr-defined]
)
return func
return wrap
@pytest.fixture
def addmeth(hc: HookCaller) -> AddMeth:
return AddMeth(hc)
def funcs(hookmethods: Sequence[HookImpl]) -> list[Callable[..., object]]:
return [hookmethod.function for hookmethod in hookmethods]
def test_adding_nonwrappers(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1() -> None:
pass
@addmeth()
def he_method2() -> None:
pass
@addmeth()
def he_method3() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method2, he_method3]
def test_adding_nonwrappers_trylast(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth(trylast=True)
def he_method1() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_trylast3(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_a() -> None:
pass
@addmeth(trylast=True)
def he_method1_b() -> None:
pass
@addmeth()
def he_method1_c() -> None:
pass
@addmeth(trylast=True)
def he_method1_d() -> None:
pass
assert funcs(hc.get_hookimpls()) == [
he_method1_d,
he_method1_b,
he_method1_a,
he_method1_c,
]
def test_adding_nonwrappers_trylast2(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
@addmeth(trylast=True)
def he_method1() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_tryfirst(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth(tryfirst=True)
def he_method1() -> None:
pass
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1_middle, he_method1_b, he_method1]
def test_adding_wrappers_ordering(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth(hookwrapper=True)
def he_method1():
yield # pragma: no cover
@addmeth(wrapper=True)
def he_method1_fun():
yield # pragma: no cover
@addmeth()
def he_method1_middle():
return # pragma: no cover
@addmeth(hookwrapper=True)
def he_method3_fun():
yield # pragma: no cover
@addmeth(hookwrapper=True)
def he_method3():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [
he_method1_middle,
he_method1,
he_method1_fun,
he_method3_fun,
he_method3,
]
def test_adding_wrappers_ordering_tryfirst(hc: HookCaller, addmeth: AddMeth) -> None:
@addmeth(hookwrapper=True, tryfirst=True)
def he_method1():
yield # pragma: no cover
@addmeth(hookwrapper=True)
def he_method2():
yield # pragma: no cover
@addmeth(wrapper=True, tryfirst=True)
def he_method3():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [he_method2, he_method1, he_method3]
def test_adding_wrappers_complex(hc: HookCaller, addmeth: AddMeth) -> None:
assert funcs(hc.get_hookimpls()) == []
@addmeth(hookwrapper=True, trylast=True)
def m1():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [m1]
@addmeth()
def m2() -> None: ...
assert funcs(hc.get_hookimpls()) == [m2, m1]
@addmeth(trylast=True)
def m3() -> None: ...
assert funcs(hc.get_hookimpls()) == [m3, m2, m1]
@addmeth(hookwrapper=True)
def m4() -> None: ...
assert funcs(hc.get_hookimpls()) == [m3, m2, m1, m4]
@addmeth(wrapper=True, tryfirst=True)
def m5():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [m3, m2, m1, m4, m5]
@addmeth(tryfirst=True)
def m6() -> None: ...
assert funcs(hc.get_hookimpls()) == [m3, m2, m6, m1, m4, m5]
@addmeth()
def m7() -> None: ...
assert funcs(hc.get_hookimpls()) == [m3, m2, m7, m6, m1, m4, m5]
@addmeth(wrapper=True)
def m8():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [m3, m2, m7, m6, m1, m4, m8, m5]
@addmeth(trylast=True)
def m9() -> None: ...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m1, m4, m8, m5]
@addmeth(tryfirst=True)
def m10() -> None: ...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m10, m1, m4, m8, m5]
@addmeth(hookwrapper=True, trylast=True)
def m11() -> None: ...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m10, m11, m1, m4, m8, m5]
@addmeth(wrapper=True)
def m12():
yield # pragma: no cover
assert funcs(hc.get_hookimpls()) == [
m9,
m3,
m2,
m7,
m6,
m10,
m11,
m1,
m4,
m8,
m12,
m5,
]
@addmeth()
def m13() -> None: ...
assert funcs(hc.get_hookimpls()) == [
m9,
m3,
m2,
m7,
m13,
m6,
m10,
m11,
m1,
m4,
m8,
m12,
m5,
]
def test_hookspec(pm: PluginManager) -> None:
class HookSpec:
@hookspec()
def he_myhook1(arg1) -> None:
pass
@hookspec(firstresult=True)
def he_myhook2(arg1) -> None:
pass
@hookspec(firstresult=False)
def he_myhook3(arg1) -> None:
pass
pm.add_hookspecs(HookSpec)
assert pm.hook.he_myhook1.spec is not None
assert not pm.hook.he_myhook1.spec.opts["firstresult"]
assert pm.hook.he_myhook2.spec is not None
assert pm.hook.he_myhook2.spec.opts["firstresult"]
assert pm.hook.he_myhook3.spec is not None
assert not pm.hook.he_myhook3.spec.opts["firstresult"]
@pytest.mark.parametrize("name", ["hookwrapper", "optionalhook", "tryfirst", "trylast"])
@pytest.mark.parametrize("val", [True, False])
def test_hookimpl(name: str, val: bool) -> None:
@hookimpl(**{name: val}) # type: ignore[misc,call-overload]
def he_myhook1(arg1) -> None:
pass
if val:
assert he_myhook1.example_impl.get(name)
else:
assert not hasattr(he_myhook1, name)
def test_hookrelay_registry(pm: PluginManager) -> None:
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin:
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == []
def test_hookrelay_registration_by_specname(pm: PluginManager) -> None:
"""Verify hook caller instances may also be registered by specifying a
specname option to the hookimpl"""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert len(pm.hook.hello.get_hookimpls()) == 0
class Plugin:
@hookimpl(specname="hello")
def foo(self, arg: int) -> int:
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
def test_hookrelay_registration_by_specname_raises(pm: PluginManager) -> None:
"""Verify using specname still raises the types of errors during registration as it
would have without using specname."""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
# make sure a bad signature still raises an error when using specname
class Plugin:
@hookimpl(specname="hello")
def foo(self, arg: int, too, many, args) -> int:
return arg + 1 # pragma: no cover
with pytest.raises(PluginValidationError):
pm.register(Plugin())
# make sure check_pending still fails if specname doesn't have a
# corresponding spec. EVEN if the function name matches one.
class Plugin2:
@hookimpl(specname="bar")
def hello(self, arg: int) -> int:
return arg + 1 # pragma: no cover
pm.register(Plugin2())
with pytest.raises(PluginValidationError):
pm.check_pending()
def test_hook_conflict(pm: PluginManager) -> None:
class Api1:
@hookspec
def conflict(self) -> None:
"""Api1 hook"""
class Api2:
@hookspec
def conflict(self) -> None:
"""Api2 hook"""
pm.add_hookspecs(Api1)
with pytest.raises(ValueError) as exc:
pm.add_hookspecs(Api2)
assert str(exc.value) == (
"Hook 'conflict' is already registered within namespace "
"<class 'test_hookcaller.test_hook_conflict.<locals>.Api1'>"
)
def test_call_extra_hook_order(hc: HookCaller, addmeth: AddMeth) -> None:
"""Ensure that call_extra is calling hooks in the right order."""
order = []
@addmeth(tryfirst=True)
def method1() -> str:
order.append("1")
return "1"
@addmeth()
def method2() -> str:
order.append("2")
return "2"
@addmeth(trylast=True)
def method3() -> str:
order.append("3")
return "3"
@addmeth(wrapper=True, tryfirst=True)
def method4() -> Generator[None, str, str]:
order.append("4pre")
result = yield
order.append("4post")
return result
@addmeth(wrapper=True)
def method5() -> Generator[None, str, str]:
order.append("5pre")
result = yield
order.append("5post")
return result
@addmeth(wrapper=True, trylast=True)
def method6() -> Generator[None, str, str]:
order.append("6pre")
result = yield
order.append("6post")
return result
def extra1() -> str:
order.append("extra1")
return "extra1"
def extra2() -> str:
order.append("extra2")
return "extra2"
result = hc.call_extra([extra1, extra2], {"arg": "test"})
assert order == [
"4pre",
"5pre",
"6pre",
"1",
"extra2",
"extra1",
"2",
"3",
"6post",
"5post",
"4post",
]
assert result == [
"1",
"extra2",
"extra1",
"2",
"3",
]
| AddMeth |
python | walkccc__LeetCode | solutions/81. Search in Rotated Sorted Array II/81.py | {
"start": 0,
"end": 569
} | class ____:
def search(self, nums: list[int], target: int) -> bool:
l = 0
r = len(nums) - 1
while l <= r:
m = (l + r) // 2
if nums[m] == target:
return True
if nums[l] == nums[m] == nums[r]:
l += 1
r -= 1
elif nums[l] <= nums[m]: # nums[l..m] are sorted
if nums[l] <= target < nums[m]:
r = m - 1
else:
l = m + 1
else: # nums[m..n - 1] are sorted
if nums[m] < target <= nums[r]:
l = m + 1
else:
r = m - 1
return False
| Solution |
python | ray-project__ray | python/ray/train/v2/_internal/state/schema.py | {
"start": 1236,
"end": 2024
} | class ____(str, Enum):
"""Enumeration of the possible statuses for a Train run attempt."""
# ====== Active States ======
# The run attempt is waiting to be scheduled.
PENDING = "PENDING"
# The run attempt is currently in progress.
RUNNING = "RUNNING"
# ===== Terminal States =====
# The run attempt completed successfully.
FINISHED = "FINISHED"
# The run attempt failed due to an error in the training workers.
ERRORED = "ERRORED"
# The run attempt was terminated due to system or controller errors.
ABORTED = "ABORTED"
def is_terminal(self) -> bool:
return self in [
RunAttemptStatus.FINISHED,
RunAttemptStatus.ERRORED,
RunAttemptStatus.ABORTED,
]
@DeveloperAPI
| RunAttemptStatus |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_alloy_db.py | {
"start": 69577,
"end": 73357
} | class ____:
def setup_method(self):
self.operator = AlloyDBDeleteUserOperator(
task_id=TEST_TASK_ID,
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
gcp_conn_id=TEST_GCP_CONN_ID,
request_id=TEST_REQUEST_ID,
validate_request=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_init(self):
assert self.operator.cluster_id == TEST_CLUSTER_ID
assert self.operator.user_id == TEST_USER_ID
def test_template_fields(self):
expected_template_fields = {"user_id", "cluster_id"} | set(AlloyDBWriteBaseOperator.template_fields)
assert set(AlloyDBDeleteUserOperator.template_fields) == expected_template_fields
@mock.patch(DELETE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute(self, mock_hook, mock_log):
mock_delete_user = mock_hook.return_value.delete_user
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_delete_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert result is None
mock_log.info.assert_has_calls(
[
call("Deleting an AlloyDB user."),
call("AlloyDB user %s was successfully removed.", TEST_USER_ID),
]
)
@mock.patch(DELETE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request(self, mock_hook, mock_log):
mock_delete_user = mock_hook.return_value.delete_user
mock_context = mock.MagicMock()
self.operator.validate_request = True
result = self.operator.execute(context=mock_context)
mock_delete_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert result is None
mock_log.info.assert_called_once_with("Validating a Delete AlloyDB user request.")
@mock.patch(DELETE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_exception(self, mock_hook, mock_log):
mock_delete_user = mock_hook.return_value.delete_user
mock_delete_user.side_effect = Exception
mock_context = mock.MagicMock()
with pytest.raises(AirflowException):
self.operator.execute(context=mock_context)
mock_delete_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_log.info.assert_called_once_with("Deleting an AlloyDB user.")
| TestAlloyDBDeleteUserOperator |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 11,
"end": 144
} | class ____(
Aaaaaaaaaaaaaaaaa,
Bbbbbbbbbbbbbbbb,
DDDDDDDDDDDDDDDD,
EEEEEEEEEEEEEE,
metaclass=meta,
):
pass
| Test |
python | tensorflow__tensorflow | tensorflow/python/keras/combinations.py | {
"start": 2271,
"end": 2753
} | class ____(test_combinations.TestCombination):
"""Combination for Keras test mode.
It by default includes v1_session, v2_eager and v2_tf_function.
"""
def context_managers(self, kwargs):
run_eagerly = kwargs.pop('run_eagerly', None)
if run_eagerly is not None:
return [testing_utils.run_eagerly_scope(run_eagerly)]
else:
return []
def parameter_modifiers(self):
return [test_combinations.OptionalParameter('run_eagerly')]
| KerasModeCombination |
python | wandb__wandb | tests/system_tests/test_functional/test_tensorboard/test_keras_tb_callback.py | {
"start": 233,
"end": 2047
} | class ____(keras.Model):
def build(self, _):
self.dense = keras.layers.Dense(10)
def call(self, x):
outputs = self.dense(x)
tf.summary.histogram("outputs", outputs)
return outputs
def test_tb_callback(wandb_backend_spy):
np.random.seed(42)
with wandb.init(sync_tensorboard=True) as run:
model = MyModel()
model.compile("sgd", "mse")
x_train = np.random.rand(100, 28)
y_train = np.random.rand(100, 10)
tb_callback = keras.callbacks.TensorBoard(write_images=True, histogram_freq=5)
model.fit(
x_train,
y_train,
epochs=10,
callbacks=[tb_callback],
)
with wandb_backend_spy.freeze() as snapshot:
assert len(snapshot.run_ids()) == 1
summary = snapshot.summary(run_id=run.id)
history = snapshot.history(run_id=run.id)
assert (
len(list(step for step, item in history.items() if "epoch_loss" in item))
== 10
)
assert summary["global_step"] == 9
for tag in ["epoch_loss", "epoch_learning_rate"]:
assert tag in summary
for tag in ["kernel/histogram", "bias/histogram"]:
assert summary[tag]["_type"] == "histogram"
items_with_tag = list(step for step, item in history.items() if tag in item)
assert len(items_with_tag) == 2
for tag in ["kernel/image", "bias/image"]:
assert summary[tag]["_type"] == "images/separated"
items_with_tag = list(step for step, item in history.items() if tag in item)
assert len(items_with_tag) == 2
telemetry = snapshot.telemetry(run_id=run.id)
assert 35 in telemetry["3"] # tensorboard_sync
wandb.tensorboard.unpatch()
| MyModel |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 17785,
"end": 22788
} | class ____(_expr_core.OperatorMixin):
_schema: t.ClassVar[_TypeMap[Literal["object"]]] = {"type": "object"}
def __init__(self, expr: IntoExpression) -> None:
self.expr = expr
def to_dict(self) -> dict[str, str]:
return {"expr": repr(self.expr)}
def _to_expr(self) -> str:
return repr(self.expr)
def _from_expr(self, expr: IntoExpression) -> SelectionExpression:
return SelectionExpression(expr=expr)
def check_fields_and_encodings(parameter: Parameter, field_name: str) -> bool:
param = parameter.param
if utils.is_undefined(param) or isinstance(param, core.VariableParameter):
return False
for prop in ["fields", "encodings"]:
try:
if field_name in getattr(param.select, prop):
return True
except (AttributeError, TypeError):
pass
return False
# -------------------------------------------------------------------------
# Tools for working with conditions
_TestPredicateType: TypeAlias = Union[
str, _expr_core.Expression, core.PredicateComposition
]
"""https://vega.github.io/vega-lite/docs/predicate.html"""
_PredicateType: TypeAlias = Union[
Parameter,
core.Expr,
"_ConditionExtra",
_TestPredicateType,
_expr_core.OperatorMixin,
]
"""Permitted types for `predicate`."""
_ComposablePredicateType: TypeAlias = Union[
_expr_core.OperatorMixin, core.PredicateComposition
]
"""Permitted types for `&` reduced predicates."""
_StatementType: TypeAlias = Union[SchemaBase, Map, str]
"""Permitted types for `if_true`/`if_false`.
In python terms:
```py
if _PredicateType:
return _StatementType
elif _PredicateType:
return _StatementType
else:
return _StatementType
```
"""
_FieldEqualType: TypeAlias = Union["IntoExpression", Parameter, SchemaBase]
"""
Permitted types for equality checks on field values.
Applies to the following context(s):
import altair as alt
alt.datum.field == ...
alt.FieldEqualPredicate(field="field", equal=...)
alt.when(field=...)
alt.when().then().when(field=...)
alt.Chart.transform_filter(field=...)
"""
def _is_test_predicate(obj: Any) -> TypeIs[_TestPredicateType]:
return isinstance(obj, (str, _expr_core.Expression, core.PredicateComposition))
def _get_predicate_expr(p: Parameter) -> Optional[_TestPredicateType]:
# https://vega.github.io/vega-lite/docs/predicate.html
return getattr(p.param, "expr", Undefined)
def _predicate_to_condition(
predicate: _PredicateType, *, empty: Optional[bool] = Undefined
) -> _Condition:
condition: _Condition
if isinstance(predicate, Parameter):
predicate_expr = _get_predicate_expr(predicate)
if predicate.param_type == "selection" or utils.is_undefined(predicate_expr):
condition = {"param": predicate.name}
if isinstance(empty, bool):
condition["empty"] = empty
elif isinstance(predicate.empty, bool):
condition["empty"] = predicate.empty
else:
condition = {"test": predicate_expr}
elif _is_test_predicate(predicate):
condition = {"test": predicate}
elif isinstance(predicate, dict):
condition = predicate
elif isinstance(predicate, _expr_core.OperatorMixin):
condition = {"test": predicate._to_expr()}
else:
msg = (
f"Expected a predicate, but got: {type(predicate).__name__!r}\n\n"
f"From `predicate={predicate!r}`."
)
raise TypeError(msg)
return condition
def _condition_to_selection(
condition: _Condition,
if_true: _StatementType,
if_false: _StatementType,
**kwargs: Any,
) -> SchemaBase | _Conditional[_Condition]:
selection: SchemaBase | _Conditional[_Condition]
if isinstance(if_true, SchemaBase):
if_true = if_true.to_dict()
elif isinstance(if_true, str):
if isinstance(if_false, str):
msg = (
"A field cannot be used for both the `if_true` and `if_false` "
"values of a condition. "
"One of them has to specify a `value` or `datum` definition."
)
raise ValueError(msg)
else:
if_true = utils.parse_shorthand(if_true)
if_true.update(kwargs)
cond_mutable: Any = dict(condition)
cond_mutable.update(if_true)
if isinstance(if_false, SchemaBase):
# For the selection, the channel definitions all allow selections
# already. So use this SchemaBase wrapper if possible.
selection = if_false.copy()
selection.condition = cond_mutable
elif isinstance(if_false, (str, dict)):
if isinstance(if_false, str):
if_false = utils.parse_shorthand(if_false)
if_false.update(kwargs)
selection = _Conditional(condition=cond_mutable, **if_false) # type: ignore[typeddict-item]
else:
raise TypeError(if_false)
return selection
| SelectionExpression |
python | wandb__wandb | wandb/sdk/launch/sweeps/scheduler.py | {
"start": 2361,
"end": 26868
} | class ____(ABC):
"""A controller/agent that populates a Launch RunQueue from a hyperparameter sweep."""
PLACEHOLDER_URI = "placeholder-uri-scheduler"
SWEEP_JOB_TYPE = "sweep-controller"
ENTRYPOINT = ["wandb", "scheduler", "WANDB_SWEEP_ID"]
def __init__(
self,
api: "Api",
*args: Optional[Any],
polling_sleep: Optional[float] = None,
sweep_id: Optional[str] = None,
entity: Optional[str] = None,
project: Optional[str] = None,
project_queue: Optional[str] = None,
num_workers: Optional[Union[int, str]] = None,
**kwargs: Optional[Any],
):
import yaml
from wandb.apis.public import Api as PublicApi
self._api = api
self._public_api = PublicApi()
self._entity = (
entity
or os.environ.get("WANDB_ENTITY")
or api.settings("entity")
or api.default_entity
)
self._project = (
project or os.environ.get("WANDB_PROJECT") or api.settings("project")
)
self._sweep_id: str = sweep_id or "empty-sweep-id"
self._state: SchedulerState = SchedulerState.PENDING
# Make sure the provided sweep_id corresponds to a valid sweep
try:
resp = self._api.sweep(
sweep_id, "{}", entity=self._entity, project=self._project
)
if resp.get("state") == SchedulerState.CANCELLED.name:
self._state = SchedulerState.CANCELLED
self._sweep_config = yaml.safe_load(resp["config"])
self._num_runs_launched: int = self._get_num_runs_launched(resp["runs"])
if self._num_runs_launched > 0:
wandb.termlog(
f"{LOG_PREFIX}Found {self._num_runs_launched} previous valid runs for sweep {self._sweep_id}"
)
except Exception as e:
raise SchedulerError(
f"{LOG_PREFIX}Exception when finding sweep ({sweep_id}) {e}"
)
# Scheduler may receive additional kwargs which will be piped into the launch command
self._kwargs: Dict[str, Any] = kwargs
# Dictionary of the runs being managed by the scheduler
self._runs: Dict[str, SweepRun] = {}
# Threading lock to ensure thread-safe access to the runs dictionary
self._threading_lock: threading.Lock = threading.Lock()
self._polling_sleep = (
polling_sleep if polling_sleep is not None else DEFAULT_POLLING_SLEEP
)
self._project_queue = project_queue
# Optionally run multiple workers in (pseudo-)parallel. Workers do not
# actually run training workloads, they simply send heartbeat messages
# (emulating a real agent) and add new runs to the launch queue. The
# launch agent is the one that actually runs the training workloads.
self._workers: Dict[int, _Worker] = {}
# Init wandb scheduler run
self._wandb_run = self._init_wandb_run()
# Grab params from scheduler wandb run config
num_workers = num_workers or self._wandb_run.config.get("scheduler", {}).get(
"num_workers"
)
self._num_workers = int(num_workers) if str(num_workers).isdigit() else 8
self._settings_config: Dict[str, Any] = self._wandb_run.config.get(
"settings", {}
)
@abstractmethod
def _get_next_sweep_run(self, worker_id: int) -> Optional[SweepRun]:
"""Called when worker available."""
@abstractmethod
def _poll(self) -> None:
"""Called every polling loop."""
@abstractmethod
def _exit(self) -> None:
pass
@abstractmethod
def _load_state(self) -> None:
pass
@abstractmethod
def _save_state(self) -> None:
pass
@property
def state(self) -> SchedulerState:
_logger.debug(f"{LOG_PREFIX}Scheduler state is {self._state.name}")
return self._state
@state.setter
def state(self, value: SchedulerState) -> None:
_logger.debug(f"{LOG_PREFIX}Scheduler was {self.state.name} is {value.name}")
self._state = value
@property
def is_alive(self) -> bool:
if self.state in [
SchedulerState.COMPLETED,
SchedulerState.FAILED,
SchedulerState.STOPPED,
SchedulerState.CANCELLED,
]:
return False
return True
@property
def at_runcap(self) -> bool:
"""False if under user-specified cap on # of runs."""
run_cap = self._sweep_config.get("run_cap")
if not run_cap:
return False
at_runcap: bool = self._num_runs_launched >= run_cap
return at_runcap
@property
def num_active_runs(self) -> int:
return len(self._runs)
@property
def busy_workers(self) -> Dict[int, _Worker]:
"""Returns dict of id:worker already assigned to a launch run.
runs should always have a worker_id, but are created before
workers are assigned to the run
"""
busy_workers = {}
for _, r in self._yield_runs():
busy_workers[r.worker_id] = self._workers[r.worker_id]
return busy_workers
@property
def available_workers(self) -> Dict[int, _Worker]:
"""Returns dict of id:worker ready to launch another run."""
if len(self._workers) == 0:
return {}
return {
_id: w for _id, w in self._workers.items() if _id not in self.busy_workers
}
def _init_wandb_run(self) -> "wandb.Run":
"""Controls resume or init logic for a scheduler wandb run."""
settings = wandb.Settings(disable_job_creation=True)
run: wandb.Run = wandb.init( # type: ignore
name=f"Scheduler.{self._sweep_id}",
resume="allow",
config=self._kwargs, # when run as a job, this sets config
settings=settings,
)
return run
def stop_sweep(self) -> None:
"""Stop the sweep."""
self._state = SchedulerState.STOPPED
def fail_sweep(self, err: Optional[str]) -> None:
"""Fail the sweep w/ optional exception."""
self._state = SchedulerState.FAILED
if err:
raise SchedulerError(err)
def start(self) -> None:
"""Start a scheduler, confirms prerequisites, begins execution loop."""
wandb.termlog(f"{LOG_PREFIX}Scheduler starting.")
if not self.is_alive:
wandb.termerror(
f"{LOG_PREFIX}Sweep already in end state ({self.state.name.lower()}). Exiting..."
)
self.exit()
return
self._state = SchedulerState.STARTING
if not self._try_load_executable():
wandb.termerror(
f"{LOG_PREFIX}No 'job' or 'image_uri' loaded from sweep config."
)
self.exit()
return
# For resuming sweeps
self._load_state()
asyncio.run(self._register_agents())
self.run()
def run(self) -> None:
"""Main run function."""
wandb.termlog(f"{LOG_PREFIX}Scheduler running")
self.state = SchedulerState.RUNNING
try:
while True:
self._update_scheduler_run_state()
if not self.is_alive:
break
wandb.termlog(f"{LOG_PREFIX}Polling for new runs to launch")
self._update_run_states()
self._poll()
if self.state == SchedulerState.FLUSH_RUNS:
if self.num_active_runs == 0:
wandb.termlog(f"{LOG_PREFIX}Done polling on runs, exiting")
break
time.sleep(self._polling_sleep)
continue
for worker_id in self.available_workers:
if self.at_runcap:
wandb.termlog(
f"{LOG_PREFIX}Sweep at run_cap ({self._num_runs_launched})"
)
self.state = SchedulerState.FLUSH_RUNS
break
try:
run: Optional[SweepRun] = self._get_next_sweep_run(worker_id)
if not run:
break
except SchedulerError as e:
raise SchedulerError(e)
except Exception as e:
wandb.termerror(
f"{LOG_PREFIX}Failed to get next sweep run: {e}"
)
self.state = SchedulerState.FAILED
break
if self._add_to_launch_queue(run):
self._num_runs_launched += 1
time.sleep(self._polling_sleep)
except KeyboardInterrupt:
wandb.termwarn(f"{LOG_PREFIX}Scheduler received KeyboardInterrupt. Exiting")
self.state = SchedulerState.STOPPED
self.exit()
return
except Exception as e:
wandb.termlog(f"{LOG_PREFIX}Scheduler failed with exception {e}")
self.state = SchedulerState.FAILED
self.exit()
raise
else:
# scheduler succeeds if at runcap
if self.state == SchedulerState.FLUSH_RUNS and self.at_runcap:
self.state = SchedulerState.COMPLETED
self.exit()
def exit(self) -> None:
self._exit()
# _save_state isn't controlled, possibly fails
try:
self._save_state()
except Exception:
wandb.termerror(
f"{LOG_PREFIX}Failed to save state: {traceback.format_exc()}"
)
status = ""
if self.state == SchedulerState.FLUSH_RUNS:
self._set_sweep_state("PAUSED")
status = "paused"
elif self.state == SchedulerState.COMPLETED:
self._set_sweep_state("FINISHED")
status = "completed"
elif self.state in [SchedulerState.CANCELLED, SchedulerState.STOPPED]:
self._set_sweep_state("CANCELED") # one L
status = "cancelled"
self._stop_runs()
else:
self.state = SchedulerState.FAILED
self._set_sweep_state("CRASHED")
status = "crashed"
self._stop_runs()
wandb.termlog(f"{LOG_PREFIX}Scheduler {status}")
self._wandb_run.finish()
def _get_num_runs_launched(self, runs: List[Dict[str, Any]]) -> int:
"""Returns the number of valid runs in the sweep."""
count = 0
for run in runs:
# if bad run, shouldn't be counted against run cap
if run.get("state", "") in ["killed", "crashed"] and not run.get(
"summaryMetrics"
):
_logger.debug(
f"excluding run: {run['name']} with state: {run['state']} from run cap \n{run}"
)
continue
count += 1
return count
def _try_load_executable(self) -> bool:
"""Check existence of valid executable for a run.
logs and returns False when job is unreachable
"""
if self._kwargs.get("job"):
try:
_job_artifact = self._public_api.job(self._kwargs["job"])
wandb.termlog(
f"{LOG_PREFIX}Successfully loaded job ({_job_artifact.name}) in scheduler"
)
except Exception:
wandb.termerror(f"{LOG_PREFIX}{traceback.format_exc()}")
return False
return True
elif self._kwargs.get("image_uri"):
# TODO(gst): check docker existence? Use registry in launch config?
return True
else:
return False
async def _register_agents(self) -> None:
tasks = []
register_agent = event_loop_thread_exec(self._api.register_agent)
for worker_id in range(self._num_workers):
_logger.debug(f"{LOG_PREFIX}Starting AgentHeartbeat worker ({worker_id})")
try:
worker = register_agent(
f"{socket.gethostname()}-{worker_id}", # host
sweep_id=self._sweep_id,
project_name=self._project,
entity=self._entity,
)
tasks.append(worker)
except Exception as e:
_logger.debug(f"failed to register agent: {e}")
self.fail_sweep(f"failed to register agent: {e}")
finished_tasks = await asyncio.gather(*tasks)
for idx, agent_config in enumerate(finished_tasks):
self._workers[idx] = _Worker(
agent_config=agent_config,
agent_id=agent_config["id"],
)
def _yield_runs(self) -> Iterator[Tuple[str, SweepRun]]:
"""Thread-safe way to iterate over the runs."""
with self._threading_lock:
yield from self._runs.items()
def _cleanup_runs(self, runs_to_remove: List[str]) -> None:
"""Helper for removing runs from memory.
Can be overloaded to prevent deletion of runs, which is useful
for debugging or when polling on completed runs.
"""
with self._threading_lock:
for run_id in runs_to_remove:
wandb.termlog(f"{LOG_PREFIX}Cleaning up finished run ({run_id})")
del self._runs[run_id]
def _stop_runs(self) -> None:
to_delete = []
for run_id, _ in self._yield_runs():
to_delete += [run_id]
for run_id in to_delete:
wandb.termlog(f"{LOG_PREFIX}Stopping run ({run_id})")
if not self._stop_run(run_id):
wandb.termwarn(f"{LOG_PREFIX}Failed to stop run ({run_id})")
def _stop_run(self, run_id: str) -> bool:
"""Stops a run and removes it from the scheduler."""
if run_id not in self._runs:
_logger.debug(f"run: {run_id} not in _runs: {self._runs}")
return False
run = self._runs[run_id]
del self._runs[run_id]
if not run.queued_run:
_logger.debug(
f"tried to _stop_run but run not queued yet (run_id:{run.id})"
)
return False
if not run.state.is_alive:
# run already dead, just delete reference
return True
# run still alive, send stop signal
encoded_run_id = base64.standard_b64encode(
f"Run:v1:{run_id}:{self._project}:{self._entity}".encode()
).decode("utf-8")
try:
success: bool = self._api.stop_run(run_id=encoded_run_id)
if success:
wandb.termlog(f"{LOG_PREFIX}Stopped run {run_id}.")
return True
except Exception as e:
_logger.debug(f"error stopping run ({run_id}): {e}")
return False
def _update_scheduler_run_state(self) -> None:
"""Update the scheduler state from state of scheduler run and sweep state."""
state: RunState = self._get_run_state(self._wandb_run.id)
# map scheduler run-state to scheduler-state
if state == RunState.KILLED:
self.state = SchedulerState.STOPPED
elif state in [RunState.FAILED, RunState.CRASHED]:
self.state = SchedulerState.FAILED
elif state == RunState.FINISHED:
self.state = SchedulerState.COMPLETED
# check sweep state for completed states, overwrite scheduler state
try:
sweep_state = self._api.get_sweep_state(
self._sweep_id, self._entity, self._project
)
except Exception as e:
_logger.debug(f"sweep state error: {e}")
return
if sweep_state == "FINISHED":
self.state = SchedulerState.COMPLETED
elif sweep_state in ["CANCELLED", "STOPPED"]:
self.state = SchedulerState.CANCELLED
elif sweep_state == "PAUSED":
self.state = SchedulerState.FLUSH_RUNS
def _update_run_states(self) -> None:
"""Iterate through runs.
Get state from backend and deletes runs if not in running state. Threadsafe.
"""
runs_to_remove: List[str] = []
for run_id, run in self._yield_runs():
run.state = self._get_run_state(run_id, run.state)
try:
rqi_state = run.queued_run.state if run.queued_run else None
except (CommError, LaunchError) as e:
_logger.debug(f"Failed to get queued_run.state: {e}")
rqi_state = None
if not run.state.is_alive or rqi_state == "failed":
_logger.debug(f"({run_id}) states: ({run.state}, {rqi_state})")
runs_to_remove.append(run_id)
self._cleanup_runs(runs_to_remove)
def _get_metrics_from_run(self, run_id: str) -> List[Any]:
"""Use the public api to get metrics from a run.
Uses the metric name found in the sweep config, any
misspellings will result in an empty list.
"""
try:
queued_run: Optional[QueuedRun] = self._runs[run_id].queued_run
if not queued_run:
return []
api_run: Run = self._public_api.run(
f"{queued_run.entity}/{queued_run.project}/{run_id}"
)
metric_name = self._sweep_config["metric"]["name"]
history = api_run.scan_history(keys=["_step", metric_name])
metrics = [x[metric_name] for x in history]
return metrics
except Exception as e:
_logger.debug(f"[_get_metrics_from_run] {e}")
return []
def _get_run_info(self, run_id: str) -> Dict[str, Any]:
"""Use the public api to get info about a run."""
try:
info: Dict[str, Any] = self._api.get_run_info(
self._entity, self._project, run_id
)
if info:
return info
except Exception as e:
_logger.debug(f"[_get_run_info] {e}")
return {}
def _get_run_state(
self, run_id: str, prev_run_state: RunState = RunState.UNKNOWN
) -> RunState:
"""Use the public api to get state of a run."""
run_state = None
try:
state = self._api.get_run_state(self._entity, self._project, run_id)
run_state = RunState(state)
except CommError as e:
_logger.debug(f"error getting state for run ({run_id}): {e}")
if prev_run_state == RunState.UNKNOWN:
# triggers when we get an unknown state for the second time
wandb.termwarn(
f"Failed to get runstate for run ({run_id}). Error: {traceback.format_exc()}"
)
run_state = RunState.FAILED
else: # first time we get unknown state
run_state = RunState.UNKNOWN
except (AttributeError, ValueError):
wandb.termwarn(
f"Bad state ({run_state}) for run ({run_id}). Error: {traceback.format_exc()}"
)
run_state = RunState.UNKNOWN
return run_state
def _create_run(self) -> Dict[str, Any]:
"""Use the public api to create a blank run."""
try:
run: List[Dict[str, Any]] = self._api.upsert_run(
project=self._project,
entity=self._entity,
sweep_name=self._sweep_id,
)
if run:
return run[0]
except Exception as e:
_logger.debug(f"[_create_run] {e}")
raise SchedulerError(
"Error creating run from scheduler, check API connection and CLI version."
)
return {}
def _set_sweep_state(self, state: str) -> None:
wandb.termlog(f"{LOG_PREFIX}Updating sweep state to: {state.lower()}")
try:
self._api.set_sweep_state(sweep=self._sweep_id, state=state)
except Exception as e:
_logger.debug(f"[set_sweep_state] {e}")
def _encode(self, _id: str) -> str:
return (
base64.b64decode(bytes(_id.encode("utf-8"))).decode("utf-8").split(":")[2]
)
def _make_entry_and_launch_config(
self, run: SweepRun
) -> Tuple[Optional[List[str]], Dict[str, Dict[str, Any]]]:
args = create_sweep_command_args({"args": run.args})
entry_point, macro_args = make_launch_sweep_entrypoint(
args, self._sweep_config.get("command")
)
# handle program macro
if entry_point and "${program}" in entry_point:
if not self._sweep_config.get("program"):
raise SchedulerError(
f"{LOG_PREFIX}Program macro in command has no corresponding 'program' in sweep config."
)
pidx = entry_point.index("${program}")
entry_point[pidx] = self._sweep_config["program"]
launch_config = copy.deepcopy(self._wandb_run.config.get("launch", {}))
if "overrides" not in launch_config:
launch_config["overrides"] = {"run_config": {}}
if "run_config" not in launch_config["overrides"]:
launch_config["overrides"]["run_config"] = {}
launch_config["overrides"]["run_config"].update(args["args_dict"])
if macro_args: # pipe in hyperparam args as params to launch
launch_config["overrides"]["args"] = macro_args
if entry_point:
unresolved = [x for x in entry_point if str(x).startswith("${")]
if unresolved:
wandb.termwarn(
f"{LOG_PREFIX}Sweep command contains unresolved macros: "
f"{unresolved}, see launch docs for supported macros."
)
return entry_point, launch_config
def _add_to_launch_queue(self, run: SweepRun) -> bool:
"""Convert a sweeprun into a launch job then push to runqueue."""
# job and image first from CLI args, then from sweep config
_job = self._kwargs.get("job") or self._sweep_config.get("job")
_sweep_config_uri = self._sweep_config.get("image_uri")
_image_uri = self._kwargs.get("image_uri") or _sweep_config_uri
if _job is None and _image_uri is None:
raise SchedulerError(f"{LOG_PREFIX}No 'job' nor 'image_uri' ({run.id})")
elif _job is not None and _image_uri is not None:
raise SchedulerError(f"{LOG_PREFIX}Sweep has both 'job' and 'image_uri'")
entry_point, launch_config = self._make_entry_and_launch_config(run)
if entry_point:
wandb.termwarn(
f"{LOG_PREFIX}Sweep command {entry_point} will override"
f" {'job' if _job else 'image_uri'} entrypoint"
)
# override resource and args of job
_job_launch_config = copy.deepcopy(self._wandb_run.config.get("launch")) or {}
# default priority is "medium"
_priority = int(launch_config.get("priority", 2)) # type: ignore
# strip resource_args and template_variables from launch_config
strip_resource_args_and_template_vars(_job_launch_config)
run_id = run.id or generate_id()
queued_run = launch_add(
run_id=run_id,
entry_point=entry_point,
config=launch_config,
docker_image=_image_uri, # TODO(gst): make agnostic (github? run uri?)
job=_job,
project=self._project,
entity=self._entity,
queue_name=self._kwargs.get("queue"),
project_queue=self._project_queue,
resource=_job_launch_config.get("resource"),
resource_args=_job_launch_config.get("resource_args"),
template_variables=_job_launch_config.get("template_variables"),
author=self._kwargs.get("author"),
sweep_id=self._sweep_id,
priority=_priority,
)
run.queued_run = queued_run
# TODO(gst): unify run and queued_run state
run.state = RunState.RUNNING # assume it will get picked up
self._runs[run_id] = run
wandb.termlog(
f"{LOG_PREFIX}Added run ({run_id}) to queue ({self._kwargs.get('queue')})"
)
return True
| Scheduler |
python | scipy__scipy | scipy/io/_harwell_boeing/tests/test_hb.py | {
"start": 1963,
"end": 2516
} | class ____:
def check_save_load(self, value):
with tempfile.NamedTemporaryFile(mode='w+t') as file:
hb_write(file, value)
file.file.seek(0)
value_loaded = hb_read(file, spmatrix=False)
assert_csc_almost_equal(value, value_loaded)
def test_simple(self):
random_arr = random_array((10, 100), density=0.1)
for format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
arr = random_arr.asformat(format, copy=False)
self.check_save_load(arr)
| TestHBReadWrite |
python | ansible__ansible | lib/ansible/module_utils/_internal/_json/_profiles/__init__.py | {
"start": 3234,
"end": 10629
} | class ____(t.Generic[_T_encoder, _T_decoder]):
serialize_map: t.ClassVar[dict[type, t.Callable]]
"""
Each concrete non-JSON type must be included in this mapping to support serialization.
Including a JSON type in the mapping allows for overriding or disabling of serialization of that type.
"""
deserialize_map: t.ClassVar[dict[str, t.Callable]]
"""A mapping of type keys to type dispatchers for deserialization."""
allowed_ansible_serializable_types: t.ClassVar[frozenset[type[AnsibleSerializable]]] = frozenset()
"""Each concrete AnsibleSerialiable derived type must be included in this set to support serialization."""
_common_discard_tags: t.ClassVar[dict[type, t.Callable]]
"""
Serialize map for tagged types to have their tags discarded.
This is generated by __init_subclass__ and should not be manually updated.
"""
_allowed_type_keys: t.ClassVar[frozenset[str]]
"""
The set of type keys allowed during deserialization.
This is generated by __init_subclass__ and should not be manually updated.
"""
_unwrapped_json_types: t.ClassVar[frozenset[type]]
"""
The set of types that do not need to be wrapped during serialization.
This is generated by __init_subclass__ and should not be manually updated.
"""
profile_name: t.ClassVar[str]
"""
The user-facing name of the profile, derived from the module name in which the profile resides.
Used to load the profile dynamically at runtime.
This is generated by __init_subclass__ and should not be manually updated.
"""
encode_strings_as_utf8: t.ClassVar[bool] = False
r"""
When enabled, JSON encoding will result in UTF8 strings being emitted.
Otherwise, non-ASCII strings will be escaped with `\uXXXX` escape sequences.`
"""
@classmethod
def pre_serialize(cls, encoder: _T_encoder, o: t.Any) -> t.Any:
return o
@classmethod
def post_deserialize(cls, decoder: _T_decoder, o: t.Any) -> t.Any:
return o
@classmethod
def cannot_serialize_error(cls, target: t.Any, /) -> t.NoReturn:
raise TypeError(f'Object of type {type(target).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
@classmethod
def cannot_deserialize_error(cls, target_type_name: str, /) -> t.NoReturn:
raise TypeError(f'Object of type {target_type_name!r} is not JSON deserializable by the {cls.profile_name!r} profile.')
@classmethod
def unsupported_target_type_error(cls, target_type_name: str, _value: dict) -> t.NoReturn:
cls.cannot_deserialize_error(target_type_name)
@classmethod
def discard_tags(cls, value: AnsibleTaggedObject) -> object:
return value._native_copy()
@classmethod
def deserialize_serializable(cls, value: dict[str, t.Any]) -> object:
type_key = value[AnsibleSerializable._TYPE_KEY]
if type_key not in cls._allowed_type_keys:
cls.cannot_deserialize_error(type_key)
return AnsibleSerializable._deserialize(value)
@classmethod
def serialize_as_list(cls, value: t.Iterable) -> list:
# DTFIX-FUTURE: once we have separate control/data channels for module-to-controller (and back), warn about this conversion
return AnsibleTagHelper.tag_copy(value, (item for item in value), value_type=list)
@classmethod
def serialize_as_isoformat(cls, value: datetime.date | datetime.time | datetime.datetime) -> str:
return value.isoformat()
@classmethod
def serialize_serializable_object(cls, value: AnsibleSerializable) -> t.Any:
return value._serialize()
@classmethod
def post_init(cls) -> None:
pass
@classmethod
def maybe_wrap(cls, o: t.Any) -> t.Any:
if type(o) in cls._unwrapped_json_types:
return o
return _WrappedValue(o)
@classmethod
def handle_key(cls, k: t.Any) -> t.Any:
"""Validation/conversion hook before a dict key is serialized. The default implementation only accepts str-typed keys."""
# NOTE: Since JSON requires string keys, there is no support for preserving tags on dictionary keys during serialization.
if not isinstance(k, str): # DTFIX-FUTURE: optimize this to use all known str-derived types in type map / allowed types
raise TypeError(f'Key of type {type(k).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
return k
@classmethod
def _handle_key_str_fallback(cls, k: t.Any) -> t.Any:
"""Legacy implementations should use this key handler for backward compatibility with stdlib JSON key conversion quirks."""
# DTFIX-FUTURE: optimized exact-type table lookup first
if isinstance(k, str):
return k
if k is None or isinstance(k, (int, float)):
return json.dumps(k)
raise TypeError(f'Key of type {type(k).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
@classmethod
def default(cls, o: t.Any) -> t.Any:
# Preserve the built-in JSON encoder support for subclasses of scalar types.
if isinstance(o, _json_subclassable_scalar_types):
return o
# Preserve the built-in JSON encoder support for subclasses of dict and list.
# Additionally, add universal support for mappings and sequences/sets by converting them to dict and list, respectively.
if _internal.is_intermediate_mapping(o):
return {cls.handle_key(k): cls.maybe_wrap(v) for k, v in o.items()}
if _internal.is_intermediate_iterable(o):
return [cls.maybe_wrap(v) for v in o]
return cls.last_chance(o)
@classmethod
def last_chance(cls, o: t.Any) -> t.Any:
if isinstance(o, Tripwire):
o.trip()
cls.cannot_serialize_error(o)
def __init_subclass__(cls, **kwargs) -> None:
cls.deserialize_map = {}
cls._common_discard_tags = {obj: cls.discard_tags for obj in _common_module_types if issubclass(obj, AnsibleTaggedObject)}
cls.post_init()
cls.profile_name = cls.__module__.rsplit('.', maxsplit=1)[-1].lstrip('_')
wrapper_types = set(obj for obj in cls.serialize_map.values() if isinstance(obj, type) and issubclass(obj, AnsibleSerializableWrapper))
cls.allowed_ansible_serializable_types |= wrapper_types
# no current need to preserve tags on controller-only types or custom behavior for anything in `allowed_serializable_types`
cls.serialize_map.update({obj: cls.serialize_serializable_object for obj in cls.allowed_ansible_serializable_types})
cls.serialize_map.update({obj: func for obj, func in _internal.get_controller_serialize_map().items() if obj not in cls.serialize_map})
cls.deserialize_map[AnsibleSerializable._TYPE_KEY] = cls.deserialize_serializable # always recognize tagged types
cls._allowed_type_keys = frozenset(obj._type_key for obj in cls.allowed_ansible_serializable_types)
cls._unwrapped_json_types = frozenset(
{obj for obj in cls.serialize_map if not issubclass(obj, _json_types)} # custom types that do not extend JSON-native types
| {obj for obj in _json_scalar_types if obj not in cls.serialize_map} # JSON-native scalars lacking custom handling
)
| _JSONSerializationProfile |
python | gevent__gevent | src/gevent/events.py | {
"start": 11286,
"end": 11832
} | class ____(IGeventWillPatchEvent):
"""
An event emitted *before* gevent begins patching a specific module.
Both *source* and *target* attributes are module objects.
"""
module_name = Attribute("The name of the module being patched. "
"This is the same as ``target.__name__``.")
target_item_names = Attribute("The list of item names to patch. "
"This can be modified in place with caution.")
@implementer(IGeventWillPatchModuleEvent)
| IGeventWillPatchModuleEvent |
python | PrefectHQ__prefect | tests/server/models/test_task_runs.py | {
"start": 27452,
"end": 29929
} | class ____:
@pytest.fixture
async def task_run_1(self, session, flow_run):
model = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id,
task_key="my-key-1",
dynamic_key="0",
tags=["red"],
state=Pending(),
),
)
await session.commit()
return model
@pytest.fixture
async def task_run_2(self, session, flow_run):
model = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id,
task_key="my-key-2",
dynamic_key="1",
tags=["red"],
state=Pending(),
),
)
await session.commit()
return model
async def test_force_releases_concurrency(self, session, task_run_1, task_run_2):
# first set flow runs in a running state
await models.flow_runs.set_flow_run_state(
session=session, flow_run_id=task_run_1.flow_run_id, state=Running()
)
await models.flow_runs.set_flow_run_state(
session=session, flow_run_id=task_run_2.flow_run_id, state=Running()
)
await concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="red", concurrency_limit=1
),
)
await session.commit()
# take a concurrency slot
await task_runs.set_task_run_state(
session, task_run_1.id, Running(), task_policy=CoreTaskPolicy
)
# assert it is used up
result = await task_runs.set_task_run_state(
session, task_run_2.id, Running(), task_policy=CoreTaskPolicy
)
assert result.status.value == "WAIT"
# forcibly take the task out of a running state
# the force will disregard the provided task policy
await task_runs.set_task_run_state(
session, task_run_1.id, Failed(), force=True, task_policy=CoreTaskPolicy
)
# assert the slot is available
result2 = await task_runs.set_task_run_state(
session, task_run_2.id, Running(), task_policy=CoreTaskPolicy
)
assert result2.status.value == "ACCEPT"
| TestPreventOrphanedConcurrencySlots |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/legendgrouptitle/_font.py | {
"start": 233,
"end": 9962
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.legendgrouptitle"
_path_str = "scatterternary.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-mdx-builder/tests/test_mdx_builder.py | {
"start": 214,
"end": 12306
} | class ____:
"""Test class for the MDX builder functionality."""
@pytest.fixture
def temp_dir(self):
"""Create a temporary directory for test files."""
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
@pytest.fixture
def test_docs_dir(self):
"""Get the path to the test documentation directory."""
return Path(__file__).parent / "test_docs"
@pytest.fixture
def built_docs(self, temp_dir, test_docs_dir):
"""Build the test documentation using the MDX builder."""
srcdir = str(test_docs_dir)
outdir = os.path.join(temp_dir, "_build", "mdx")
doctreedir = os.path.join(temp_dir, "_build", "doctrees")
confdir = str(test_docs_dir)
# Add the dummy module to the Python path
import sys
dummy_module_path = str(Path(__file__).parent)
if dummy_module_path not in sys.path:
sys.path.insert(0, dummy_module_path)
with docutils_namespace():
app = Sphinx(
srcdir=srcdir,
confdir=confdir,
outdir=outdir,
doctreedir=doctreedir,
buildername="mdx",
)
app.build()
return Path(outdir)
def test_mdx_files_generated(self, built_docs):
"""Test that MDX files are generated."""
# Check that the output directory exists
assert built_docs.exists(), f"Output directory {built_docs} does not exist"
# Check that index.mdx is generated
index_file = built_docs / "index.mdx"
assert index_file.exists(), "index.mdx file was not generated"
# Check that dummy_module.mdx is generated
dummy_module_file = built_docs / "dummy_module.mdx"
assert dummy_module_file.exists(), "dummy_module.mdx file was not generated"
def test_index_mdx_content(self, built_docs):
"""Test the content of the generated index.mdx file."""
index_file = built_docs / "index.mdx"
content = index_file.read_text()
# Check for frontmatter
assert content.startswith("---"), "MDX file should start with frontmatter"
assert "title:" in content, "Frontmatter should contain title"
assert "description:" in content, "Frontmatter should contain description"
# Check for main content
assert "Test Documentation" in content, "Should contain main heading"
assert "Welcome to the test documentation" in content, "Should contain intro text"
assert "sphinx-mdx-builder" in content, "Should mention the builder"
def test_dummy_module_mdx_content(self, built_docs):
"""Test the content of the generated dummy_module.mdx file."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check for frontmatter
assert content.startswith("---"), "MDX file should start with frontmatter"
assert "title:" in content, "Frontmatter should contain title"
# Check for module documentation
assert "Dummy Module Documentation" in content, "Should contain module heading"
assert "Vehicle" in content, "Should document Vehicle class"
assert "Car" in content, "Should document Car class"
assert "Color" in content, "Should document Color enum"
# Check for function documentation
assert "calculate_fuel_efficiency" in content, (
"Should document calculate_fuel_efficiency function"
)
assert "find_cars_by_color" in content, "Should document find_cars_by_color function"
assert "get_car_summary" in content, "Should document get_car_summary function"
# Check for constants section
assert "Constants" in content, "Should have Constants section"
assert "DEFAULT_CAR_COLOR" in content, "Should mention DEFAULT_CAR_COLOR"
assert "MAX_VEHICLE_AGE" in content, "Should mention MAX_VEHICLE_AGE"
def test_mdx_frontmatter_structure(self, built_docs):
"""Test that the MDX frontmatter has the correct structure."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
lines = content.split("\n")
# Find frontmatter boundaries
frontmatter_start = None
frontmatter_end = None
for i, line in enumerate(lines):
if line.strip() == "---":
if frontmatter_start is None:
frontmatter_start = i
else:
frontmatter_end = i
break
assert frontmatter_start is not None, "Should have frontmatter start"
assert frontmatter_end is not None, "Should have frontmatter end"
frontmatter_lines = lines[frontmatter_start + 1 : frontmatter_end]
frontmatter_content = "\n".join(frontmatter_lines)
# Check for expected frontmatter fields
assert "title:" in frontmatter_content, "Should have title field"
assert "| Test Docs" in frontmatter_content, "Should have title suffix from config"
def test_class_documentation_structure(self, built_docs):
"""Test that class documentation is properly structured."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check for class hierarchy (Car extends Vehicle)
assert "Vehicle" in content, "Should document base Vehicle class"
assert "Car" in content, "Should document Car class"
# Check for method documentation
assert "start_engine" in content, "Should document Vehicle.start_engine method"
assert "honk_horn" in content, "Should document Car.honk_horn method"
# Check for parameter documentation (MDX builder uses "Parameters:" instead of "Args:")
assert "Parameters:" in content, "Should have Parameters sections"
assert "Returns:" in content, "Should have Returns sections"
def test_function_documentation_structure(self, built_docs):
"""Test that function documentation is properly structured."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check for function signatures and documentation
assert "calculate_fuel_efficiency" in content, "Should document function"
assert "distance" in content, "Should show parameter names"
assert "fuel_used" in content, "Should show parameter names"
assert "ValueError" in content, "Should document exceptions"
def test_enum_documentation(self, built_docs):
"""Test that enum documentation is included."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check for enum documentation
assert "Color" in content, "Should document Color enum"
assert "RED" in content or "red" in content, "Should document enum values"
assert "GREEN" in content or "green" in content, "Should document enum values"
assert "BLUE" in content or "blue" in content, "Should document enum values"
def test_configuration_options_applied(self, built_docs):
"""Test that configuration options are properly applied."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check that title suffix is applied (from conf.py: mdx_title_suffix = ' | Test Docs')
assert "| Test Docs" in content, "Should apply title suffix from configuration"
# Check file extension is .mdx
assert dummy_module_file.suffix == ".mdx", "Should generate .mdx files"
# Check that GitHub URL configuration is applied (if source links are present)
if "github.com" in content:
assert "test-repo" in content, "Should use configured GitHub URL"
def test_source_links_generation(self, built_docs):
"""Test that [source] links are properly generated."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Check for source links presence
assert "[source]" in content, "Should contain [source] links"
assert "className='source-link'" in content, "Should have proper source link styling"
# Check for GitHub URLs in source links
assert "https://github.com/test/test-repo/blob/main" in content, (
"Should use configured GitHub base URL"
)
# Check for specific source file references
assert "tests/dummy_module.py" in content, "Should reference the dummy module source file"
# Check for line number references (source links should include line numbers)
import re
line_number_pattern = r"tests/dummy_module\.py#L\d+"
assert re.search(line_number_pattern, content), "Source links should include line numbers"
# Check that source links are properly formatted as JSX components
assert "target='_blank'" in content, "Source links should open in new tab"
assert "rel='noopener noreferrer'" in content, (
"Source links should have proper security attributes"
)
def test_decorated_function_source_links(self, built_docs):
"""Test that decorated functions have source links generated properly."""
dummy_module_file = built_docs / "dummy_module.mdx"
content = dummy_module_file.read_text()
# Test functions with different wrapper patterns
wrapper_functions = [
("test_dagster_style_logger", 202, 208), # Custom wrapper with logger_fn
("test_func_wrapper", 224, 230), # GenericWrapper with func
("test_function_wrapper", 234, 240), # GenericWrapper with function
("test_callback_wrapper", 243, 249), # GenericWrapper with callback
]
# Check that source links are present
assert "[source]" in content, "Should have source links for wrapped functions"
import re
# Find all source links in the content
source_link_pattern = (
r"<a[^>]*href='([^']*tests/dummy_module\.py#L\d+)'[^>]*>\[source\]</a>"
)
source_links = re.findall(source_link_pattern, content)
# Check that we have source links
assert len(source_links) > 0, "Should have at least one source link for functions"
# Check each wrapper function has its source link
found_functions = []
for func_name, start_line, end_line in wrapper_functions:
# Check that the function is documented
if func_name in content:
found_functions.append(func_name)
# Look for source link in the expected line range
function_source_found = False
for link in source_links:
if "dummy_module.py#L" in link:
line_match = re.search(r"#L(\d+)", link)
if line_match:
line_num = int(line_match.group(1))
if start_line <= line_num <= end_line:
function_source_found = True
break
assert function_source_found, (
f"Should have source link for wrapped function {func_name} "
f"in line range {start_line}-{end_line}. "
f"Found source links: {source_links}"
)
# Ensure we found at least some of the wrapper functions
assert len(found_functions) >= 2, (
f"Should document multiple wrapper functions. Found: {found_functions}"
)
def test_direct_builder_import():
"""Test that the builder can be imported directly."""
from sphinxcontrib.mdxbuilder.builders.mdx import MdxBuilder
assert MdxBuilder.name == "mdx", "Builder should have correct name"
assert MdxBuilder.format == "mdx", "Builder should have correct format"
| TestMdxBuilder |
python | getsentry__sentry | src/sentry/api/serializers/models/event.py | {
"start": 23552,
"end": 25241
} | class ____(EventSerializer):
"""
Event serializer for the minimum event data needed to send to an external service. This
should be used for Integrations that need to include event data.
"""
def serialize(self, obj, attrs, user, **kwargs):
from sentry.notifications.utils import get_notification_group_title
tags = [{"key": key.split("sentry:", 1)[-1], "value": value} for key, value in obj.tags]
for tag in tags:
query = convert_user_tag_to_query(tag["key"], tag["value"])
if query:
tag["query"] = query
map_device_class_tags(tags)
user = obj.get_minimal_user()
return {
"groupID": str(obj.group_id) if obj.group_id else None,
"eventID": str(obj.event_id),
"project": str(obj.project_id),
# XXX for 'message' this doesn't do the proper resolution of logentry
# etc. that _get_legacy_message_with_meta does.
"message": obj.message,
"title": get_notification_group_title(obj.group, obj, 1024),
"location": obj.location,
"culprit": obj.culprit,
"user": user and user.get_api_context(),
"tags": tags,
"platform": obj.platform,
"datetime": obj.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
}
def map_device_class_tags(tags):
"""
If device.class tag exists, set the value to high, medium, low
"""
for tag in tags:
if tag["key"] == "device.class":
if device_class := map_device_class_level(tag["value"]):
tag["value"] = device_class
return tags
| ExternalEventSerializer |
python | Textualize__textual | src/textual/demo/widgets.py | {
"start": 2884,
"end": 4076
} | class ____(containers.VerticalGroup):
"""Demonstrates Checkboxes."""
DEFAULT_CLASSES = "column"
DEFAULT_CSS = """
Checkboxes {
height: auto;
Checkbox, RadioButton { width: 1fr; }
&>HorizontalGroup > * { width: 1fr; }
}
"""
CHECKBOXES_MD = """\
## Checkboxes, Radio buttons, and Radio sets
Checkboxes to toggle booleans.
Radio buttons for exclusive booleans.
Hit `return` to toggle an checkbox / radio button, when focused.
"""
RADIOSET_MD = """\
### Radio Sets
A *radio set* is a list of mutually exclusive options.
Use the `up` and `down` keys to navigate the list.
Press `return` to toggle a radio button.
"""
def compose(self) -> ComposeResult:
yield Markdown(self.CHECKBOXES_MD)
yield Checkbox("A Checkbox")
yield RadioButton("A Radio Button")
yield Markdown(self.RADIOSET_MD)
yield RadioSet(
"Amanda",
"Connor MacLeod",
"Duncan MacLeod",
"Heather MacLeod",
"Joe Dawson",
"Kurgan, [bold italic red]The[/]",
"Methos",
"Rachel Ellenstein",
"Ramírez",
)
| Checkboxes |
python | kamyu104__LeetCode-Solutions | Python/maximum-fruits-harvested-after-at-most-k-steps.py | {
"start": 29,
"end": 989
} | class ____(object):
def maxTotalFruits(self, fruits, startPos, k):
"""
:type fruits: List[List[int]]
:type startPos: int
:type k: int
:rtype: int
"""
max_pos = max(startPos, fruits[-1][0])
cnt = [0]*(1+max_pos)
for p, a in fruits:
cnt[p] = a
prefix = [0]
for x in cnt:
prefix.append(prefix[-1]+x)
result = 0
for left_dist in xrange(min(startPos, k)+1):
right_dist = max(k-2*left_dist, 0)
left, right = startPos-left_dist, min(startPos+right_dist, max_pos)
result = max(result, prefix[right+1]-prefix[left])
for right_dist in xrange(min(max_pos-startPos, k)+1):
left_dist = max(k-2*right_dist, 0)
left, right = max(startPos-left_dist, 0), startPos+right_dist
result = max(result, prefix[right+1]-prefix[left])
return result
| Solution |
python | ZoranPandovski__al-go-rithms | data_structures/Tree/Binary-tree/left-view.py | {
"start": 1000,
"end": 1128
} | class ____:
def __init__(self,val):
self.data = val
self.left = None
self.right = None
# Tree Class
| Node |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/types.py | {
"start": 210,
"end": 465
} | class ____(BaseModel):
id: str
"""The ID of the tool call."""
function: Function
"""The function that the model called."""
type: Literal["function"]
"""The type of the tool. Currently, only `function` is supported."""
| ToolCallMessage |
python | realpython__materials | inheritance-and-composition/choosing/productivity.py | {
"start": 625,
"end": 738
} | class ____:
def perform_duties(self, hours):
return f"screams and yells for {hours} hours."
| ManagerRole |
python | Pylons__pyramid | tests/test_security.py | {
"start": 12705,
"end": 13886
} | class ____(unittest.TestCase):
def setUp(self):
testing.setUp()
def tearDown(self):
testing.tearDown()
def test_no_authentication_policy(self):
from pyramid.security import Everyone
request = _makeRequest()
self.assertEqual(request.effective_principals, [Everyone])
def test_with_security_policy(self):
from pyramid.security import Everyone
request = _makeRequest()
_registerSecurityPolicy(request.registry, 'yo')
self.assertEqual(request.effective_principals, [Everyone])
def test_legacy_authentication_policy(self):
request = _makeRequest()
_registerAuthenticationPolicy(request.registry, 'yo')
_registerLegacySecurityPolicy(request.registry)
self.assertEqual(request.effective_principals, 'yo')
def test_security_policy_trumps_authentication_policy(self):
from pyramid.security import Everyone
request = _makeRequest()
_registerAuthenticationPolicy(request.registry, 'wat')
_registerSecurityPolicy(request.registry, 'yo')
self.assertEqual(request.effective_principals, [Everyone])
| TestEffectivePrincipals |
python | huggingface__transformers | tests/models/idefics/test_processing_idefics.py | {
"start": 1017,
"end": 7252
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = IdeficsProcessor
input_keys = ["pixel_values", "input_ids", "attention_mask", "image_attention_mask"]
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class(return_tensors="pt")
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("HuggingFaceM4/tiny-random-idefics")
def prepare_prompts(self):
"""This function prepares a list of PIL images"""
num_images = 2
images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)]
images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images]
# print([type(x) for x in images])
# die
prompts = [
# text and 1 image
[
"User:",
images[0],
"Describe this image.\nAssistant:",
],
# text and images
[
"User:",
images[0],
"Describe this image.\nAssistant: An image of two dogs.\n",
"User:",
images[1],
"Describe this image.\nAssistant:",
],
# only text
[
"User:",
"Describe this image.\nAssistant: An image of two kittens.\n",
"User:",
"Describe this image.\nAssistant:",
],
# only images
[
images[0],
images[1],
],
]
return prompts
def test_save_load_pretrained_additional_features(self):
tokenizer_add_kwargs = self.get_component("tokenizer", bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_component("image_processor", do_normalize=False, padding_value=1.0)
processor = IdeficsProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, self._get_component_class_from_processor("tokenizer"))
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, self._get_component_class_from_processor("image_processor"))
def test_tokenizer_padding(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", padding_side="right")
processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor, return_tensors="pt")
predicted_tokens = [
"<s>Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk>",
"<s>Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk>",
]
predicted_attention_masks = [
([1] * 10) + ([0] * 9),
([1] * 10) + ([0] * 10),
]
prompts = [[prompt] for prompt in self.prepare_prompts()[2]]
max_length = processor(text=prompts, padding="max_length", truncation=True, max_length=20, return_tensors="pt")
longest = processor(text=prompts, padding="longest", truncation=True, max_length=30, return_tensors="pt")
decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1])
decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1])
self.assertEqual(decoded_max_length, predicted_tokens[1])
self.assertEqual(decoded_longest, predicted_tokens[0])
self.assertListEqual(max_length["attention_mask"][-1].tolist(), predicted_attention_masks[1])
self.assertListEqual(longest["attention_mask"][-1].tolist(), predicted_attention_masks[0])
def test_tokenizer_left_padding(self):
"""Identical to test_tokenizer_padding, but with padding_side not explicitly set."""
processor = self.get_processor()
predicted_tokens = [
"<unk><unk><unk><unk><unk><unk><unk><unk><unk><s>Describe this image.\nAssistant:",
"<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk><s>Describe this image.\nAssistant:",
]
predicted_attention_masks = [
([0] * 9) + ([1] * 10),
([0] * 10) + ([1] * 10),
]
prompts = [[prompt] for prompt in self.prepare_prompts()[2]]
max_length = processor(text=prompts, padding="max_length", truncation=True, max_length=20)
longest = processor(text=prompts, padding="longest", truncation=True, max_length=30)
decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1])
decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1])
self.assertEqual(decoded_max_length, predicted_tokens[1])
self.assertEqual(decoded_longest, predicted_tokens[0])
self.assertListEqual(max_length["attention_mask"][-1].tolist(), predicted_attention_masks[1])
self.assertListEqual(longest["attention_mask"][-1].tolist(), predicted_attention_masks[0])
def test_tokenizer_defaults(self):
# Override to account for the processor prefixing the BOS token to prompts.
components = {attribute: self.get_component(attribute) for attribute in self.processor_class.get_attributes()}
processor = self.processor_class(**components)
tokenizer = components["tokenizer"]
input_str = ["lower newer"]
encoded_processor = processor(text=input_str, padding=False, return_tensors="pt")
encoded_tok = tokenizer(
[f"{tokenizer.bos_token}{input_str[0]}"], padding=False, add_special_tokens=False, return_tensors="pt"
)
for key in encoded_tok:
if key in encoded_processor:
self.assertListEqual(encoded_tok[key].tolist(), encoded_processor[key].tolist())
| IdeficsProcessorTest |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 10743,
"end": 11436
} | class ____(Structure):
_fields_ = (("_version", p_uint32),)
@property
def major(self):
return self._version >> 16 & 0xFFFF
@major.setter
def major(self, v):
self._version = (self._version & 0xFFFF) | (v << 16)
@property
def minor(self):
return self._version >> 8 & 0xFF
@minor.setter
def minor(self, v):
self._version = (self._version & 0xFFFF00FF) | (v << 8)
@property
def rev(self):
return self._version & 0xFF
@rev.setter
def rev(self, v):
return (self._version & 0xFFFFFF00) | v
def __str__(self):
return "%s.%s.%s" % (self.major, self.minor, self.rev)
| mach_version_helper |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/vector.py | {
"start": 2072,
"end": 2504
} | class ____(Enum):
"""Enum representing the vector type,
See :ref:`oracle_vector_datatype` for background.
.. versionadded:: 2.0.43
"""
SPARSE = "SPARSE"
"""
A Sparse vector is a vector which has zero value for
most of its dimensions.
"""
DENSE = "DENSE"
"""
A Dense vector is a vector where most, if not all, elements
hold meaningful values.
"""
@dataclass
| VectorStorageType |
python | econchick__interrogate | src/interrogate/coverage.py | {
"start": 2218,
"end": 2938
} | class ____(BaseInterrogateResult):
"""Coverage results for all files.
:attr int ret_code: return code of program (``0`` for success, ``1``
for fail).
:attr list(InterrogateFileResult) file_results: list of file
results associated with this program run.
"""
ret_code: int = attr.ib(init=False, default=0, repr=False)
file_results: list[InterrogateFileResult] = attr.ib(
init=False, default=None, repr=False
)
def combine(self) -> None:
"""Tally results from each file."""
for result in self.file_results:
self.covered += result.covered
self.missing += result.missing
self.total += result.total
| InterrogateResults |
python | encode__django-rest-framework | tests/test_one_to_one_with_inheritance.py | {
"start": 772,
"end": 1255
} | class ____(TestCase):
def test_multitable_inherited_model_fields_as_expected(self):
"""
Assert that the parent pointer field is not included in the fields
serialized fields
"""
child = ChildModel(name1='parent name', name2='child name')
serializer = DerivedModelSerializer(child)
self.assertEqual(set(serializer.data),
{'name1', 'name2', 'id', 'childassociatedmodel'})
| InheritedModelSerializationTests |
python | plotly__plotly.py | plotly/graph_objs/scattersmith/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8574
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattersmith.marker.colorbar"
_path_str = "scattersmith.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattersmith.m
arker.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | jmcnamara__XlsxWriter | xlsxwriter/test/drawing/test_write_a_graphic_frame_locks.py | {
"start": 297,
"end": 844
} | class ____(unittest.TestCase):
"""
Test the Drawing _write_a_graphic_frame_locks() method.
"""
def setUp(self):
self.fh = StringIO()
self.drawing = Drawing()
self.drawing._set_filehandle(self.fh)
def test_write_a_graphic_frame_locks(self):
"""Test the _write_a_graphic_frame_locks() method"""
self.drawing._write_a_graphic_frame_locks()
exp = """<a:graphicFrameLocks noGrp="1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteAgraphicFrameLocks |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/saved_model/serialized_attributes.py | {
"start": 12862,
"end": 13231
} | class ____(SerializedAttributes.with_attributes(
'RNNAttributes',
checkpointable_objects=['states'],
copy_from=[LayerAttributes])):
"""RNN checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from LayerAttributes (including CommonEndpoints)
states: List of state variables
"""
| RNNAttributes |
python | oauthlib__oauthlib | tests/oauth1/rfc5849/endpoints/test_base.py | {
"start": 10504,
"end": 14008
} | class ____(RequestValidator):
clients = ['foo']
nonces = [('foo', 'once', '1234567891', 'fez')]
owners = {'foo': ['abcdefghijklmnopqrstuvxyz', 'fez']}
assigned_realms = {('foo', 'abcdefghijklmnopqrstuvxyz'): 'photos'}
verifiers = {('foo', 'fez'): 'shibboleth'}
@property
def client_key_length(self):
return 1, 30
@property
def request_token_length(self):
return 1, 30
@property
def access_token_length(self):
return 1, 30
@property
def nonce_length(self):
return 2, 30
@property
def verifier_length(self):
return 2, 30
@property
def realms(self):
return ['photos']
@property
def timestamp_lifetime(self):
# Disabled check to allow hardcoded verification signatures
return 1000000000
@property
def dummy_client(self):
return 'dummy'
@property
def dummy_request_token(self):
return 'dumbo'
@property
def dummy_access_token(self):
return 'dumbo'
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
resource_owner_key = request_token if request_token else access_token
return (client_key, nonce, timestamp, resource_owner_key) not in self.nonces
def validate_client_key(self, client_key):
return client_key in self.clients
def validate_access_token(self, client_key, access_token, request):
return (self.owners.get(client_key) and
access_token in self.owners.get(client_key))
def validate_request_token(self, client_key, request_token, request):
return (self.owners.get(client_key) and
request_token in self.owners.get(client_key))
def validate_requested_realm(self, client_key, realm, request):
return True
def validate_realm(self, client_key, access_token, request, uri=None,
required_realm=None):
return (client_key, access_token) in self.assigned_realms
def validate_verifier(self, client_key, request_token, verifier,
request):
return ((client_key, request_token) in self.verifiers and
safe_string_equals(verifier, self.verifiers.get(
(client_key, request_token))))
def validate_redirect_uri(self, client_key, redirect_uri, request):
return redirect_uri.startswith('http://client.example.com/')
def get_client_secret(self, client_key, request):
return 'super secret'
def get_access_token_secret(self, client_key, access_token, request):
return 'even more secret'
def get_request_token_secret(self, client_key, request_token, request):
return 'even more secret'
def get_rsa_key(self, client_key, request):
return ("-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNA"
"DCBiQKBgQDVLQCATX8iK+aZuGVdkGb6uiar\nLi/jqFwL1dYj0JLIsdQc"
"KaMWtPC06K0+vI+RRZcjKc6sNB9/7kJcKN9Ekc9BUxyT\n/D09Cz47cmC"
"YsUoiW7G8NSqbE4wPiVpGkJRzFAxaCWwOSSQ+lpC9vwxnvVQfOoZ1\nnp"
"mWbCdA0iTxsMahwQIDAQAB\n-----END PUBLIC KEY-----")
| ClientValidator |
python | jschneier__django-storages | storages/backends/dropbox.py | {
"start": 941,
"end": 2098
} | class ____(File):
def __init__(self, name, storage):
self.name = name
self._storage = storage
self._file = None
def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile()
# As dropbox==9.3.0, the client returns a tuple
# (dropbox.files.FileMetadata, requests.models.Response)
file_metadata, response = self._storage.client.files_download(self.name)
if response.status_code == 200:
with BytesIO(response.content) as file_content:
copyfileobj(file_content, self._file)
else:
# JIC the exception isn't caught by the dropbox client
raise DropboxStorageException(
"Dropbox server returned a {} response when accessing {}".format(
response.status_code, self.name
)
)
self._file.seek(0)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
DropBoxFile = DropboxFile
@deconstructible
| DropboxFile |
python | spack__spack | lib/spack/spack/llnl/util/lock.py | {
"start": 28626,
"end": 28873
} | class ____(LockError):
"""Raised when unable to downgrade from a write to a read lock."""
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super().__init__(msg)
| LockDowngradeError |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 22372,
"end": 23215
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_stored_info_types(self, mock_hook):
mock_hook.return_value.list_stored_info_types.return_value = mock.MagicMock()
operator = CloudDLPListStoredInfoTypesOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_stored_info_types.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
page_size=None,
order_by=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPListStoredInfoTypesOperator |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 75862,
"end": 79958
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Gemma3nTextConfig, device=None, layer_type=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.layer_types = list(set(config.layer_types))
self.rope_type = {}
for layer_type in self.layer_types:
rope_params = self.config.rope_parameters[layer_type]
if rope_params is None:
continue
self.rope_type[layer_type] = rope_params["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type[layer_type] != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type[layer_type]]
curr_inv_freq, curr_attention_scaling = rope_init_fn(self.config, device, layer_type=layer_type)
self.register_buffer(f"{layer_type}_inv_freq", curr_inv_freq, persistent=False)
setattr(self, f"{layer_type}_original_inv_freq", curr_inv_freq)
setattr(self, f"{layer_type}_attention_scaling", curr_attention_scaling)
@staticmethod
def compute_default_rope_parameters(
config: Optional[Gemma3nTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
layer_type: Optional[str] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
layer_type (`str`, *optional*):
The current layer type if the model has different RoPE parameters per type.
Should not be used unless `config.layer_types is not None`
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
# For backward compatibility standardize the `rope_parameters_dict` if it uses old format
base = config.rope_parameters[layer_type]["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids, layer_type=None):
inv_freq = getattr(self, f"{layer_type}_inv_freq")
attention_scaling = getattr(self, f"{layer_type}_attention_scaling")
inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * attention_scaling
sin = emb.sin() * attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring(custom_intro="The base Gemma 3n language model without a language modeling head.")
| Gemma3nRotaryEmbedding |
python | django__django | tests/template_tests/syntax_tests/test_with.py | {
"start": 2465,
"end": 2641
} | class ____(SimpleTestCase):
def test_repr(self):
node = WithNode(nodelist=[], name="a", var="dict.key")
self.assertEqual(repr(node), "<WithNode>")
| WithNodeTests |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 99737,
"end": 102896
} | class ____(PreTrainedModel):
config: Mask2FormerConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
input_modalities = ("image",)
@torch.no_grad()
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, Mask2FormerTransformerModule):
if module.input_projections is not None:
for input_projection in module.input_projections:
if not isinstance(input_projection, nn.Sequential):
init.xavier_uniform_(input_projection.weight, gain=xavier_std)
init.constant_(input_projection.bias, 0)
elif isinstance(module, Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention):
init.constant_(module.sampling_offsets.weight, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
init.copy_(module.sampling_offsets.bias, grid_init.view(-1))
init.constant_(module.attention_weights.weight, 0.0)
init.constant_(module.attention_weights.bias, 0.0)
init.xavier_uniform_(module.value_proj.weight)
init.constant_(module.value_proj.bias, 0.0)
init.xavier_uniform_(module.output_proj.weight)
init.constant_(module.output_proj.bias, 0.0)
elif isinstance(module, Mask2FormerMaskedAttentionDecoderLayer):
for p in module.parameters():
if p.dim() > 1:
init.xavier_uniform_(p, gain=xavier_std)
init.zeros_(module.cross_attn.in_proj_bias)
elif isinstance(module, Mask2FormerPixelDecoder):
init.normal_(module.level_embed, std=0)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.ones_(module.weight)
init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
if hasattr(module, "reference_points"):
init.xavier_uniform_(module.reference_points.weight, gain=1.0)
init.constant_(module.reference_points.bias, 0.0)
@auto_docstring
| Mask2FormerPreTrainedModel |
python | PrefectHQ__prefect | src/prefect/server/schemas/responses.py | {
"start": 19422,
"end": 19645
} | class ____(WorkQueueResponse, WorkQueueStatusDetail):
"""Combines a work queue and its status details into a single object"""
DEFAULT_HEARTBEAT_INTERVAL_SECONDS = 30
INACTIVITY_HEARTBEAT_MULTIPLE = 3
| WorkQueueWithStatus |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 7313,
"end": 7494
} | class ____(nn.Module):
base_model_prefix = "model"
def __init__(self):
super().__init__()
self.model = DummyTopModel()
self.mlp = DummyMLP()
| DummyRoot |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 40621,
"end": 40749
} | class ____(HelloWorldOperator):
def execute(self, context):
return super().execute(context)
| ExtendedHelloWorldOperator |
python | huggingface__transformers | examples/pytorch/old_test_xla_examples.py | {
"start": 1212,
"end": 2796
} | class ____(TestCasePlus):
def test_run_glue(self):
import xla_spawn
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert/distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(sys, "argv", testargs):
start = time()
xla_spawn.main()
end = time()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start, 500)
def test_trainer_tpu(self):
import xla_spawn
testargs = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(sys, "argv", testargs):
xla_spawn.main()
| TorchXLAExamplesTests |
python | ray-project__ray | python/ray/data/tests/test_download_expression.py | {
"start": 163,
"end": 1296
} | class ____:
"""Test DownloadExpr structural equality and basic properties."""
def test_download_expression_creation(self):
"""Test that download() creates a DownloadExpr with correct properties."""
expr = download("uri_column")
assert isinstance(expr, DownloadExpr)
assert expr.uri_column_name == "uri_column"
def test_download_expression_structural_equality(self):
"""Test structural equality comparison for download expressions."""
# Same expressions should be equal
expr1 = download("uri")
expr2 = download("uri")
assert expr1.structurally_equals(expr2)
assert expr2.structurally_equals(expr1)
# Different URI column names should not be equal
expr3 = download("different_uri")
assert not expr1.structurally_equals(expr3)
assert not expr3.structurally_equals(expr1)
# Compare with non-DownloadExpr
non_download_expr = col("uri")
assert not expr1.structurally_equals(non_download_expr)
assert not non_download_expr.structurally_equals(expr1)
| TestDownloadExpressionStructure |
python | plotly__plotly.py | plotly/graph_objs/pie/marker/_line.py | {
"start": 233,
"end": 4566
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "pie.marker"
_path_str = "pie.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
@property
def color(self):
"""
Sets the color of the line enclosing each sector.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the line enclosing each sector.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.pie.marker.Line`
color
Sets the color of the line enclosing each sector.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.pie.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_ecs.py | {
"start": 1024,
"end": 6332
} | class ____:
@pytest.fixture(autouse=True)
def _setup_test_cases(self, monkeypatch):
self.client = boto3.client("ecs", region_name="eu-west-3")
monkeypatch.setattr(EcsHook, "conn", self.client)
@pytest.fixture
def mock_describe_clusters(self):
"""Mock ``ECS.Client.describe_clusters`` method."""
with mock.patch.object(self.client, "describe_clusters") as m:
yield m
@pytest.fixture
def mock_describe_task_definition(self):
"""Mock ``ECS.Client.describe_task_definition`` method."""
with mock.patch.object(self.client, "describe_task_definition") as m:
yield m
def test_service_waiters(self):
hook_waiters = EcsHook(aws_conn_id=None).list_waiters()
assert "cluster_active" in hook_waiters
assert "cluster_inactive" in hook_waiters
@staticmethod
def describe_clusters(
status: str | EcsClusterStates, cluster_name: str = "spam-egg", failures: dict | list | None = None
):
"""
Helper function for generate minimal DescribeClusters response for single job.
https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeClusters.html
"""
if isinstance(status, EcsClusterStates):
status = status.value
else:
assert status in EcsClusterStates.__members__.values()
failures = failures or []
if isinstance(failures, dict):
failures = [failures]
return {"clusters": [{"clusterName": cluster_name, "status": status}], "failures": failures}
def test_cluster_active(self, mock_describe_clusters):
"""Test cluster reach Active state during creation."""
mock_describe_clusters.side_effect = [
self.describe_clusters(EcsClusterStates.DEPROVISIONING),
self.describe_clusters(EcsClusterStates.PROVISIONING),
self.describe_clusters(EcsClusterStates.ACTIVE),
]
waiter = EcsHook(aws_conn_id=None).get_waiter("cluster_active")
waiter.wait(clusters=["spam-egg"], WaiterConfig={"Delay": 0.01, "MaxAttempts": 3})
@pytest.mark.parametrize("state", ["FAILED", "INACTIVE"])
def test_cluster_active_failure_states(self, mock_describe_clusters, state):
"""Test cluster reach inactive state during creation."""
mock_describe_clusters.side_effect = [
self.describe_clusters(EcsClusterStates.PROVISIONING),
self.describe_clusters(state),
]
waiter = EcsHook(aws_conn_id=None).get_waiter("cluster_active")
with pytest.raises(WaiterError, match=f'matched expected path: "{state}"'):
waiter.wait(clusters=["spam-egg"], WaiterConfig={"Delay": 0.01, "MaxAttempts": 3})
def test_cluster_active_failure_reasons(self, mock_describe_clusters):
"""Test cluster reach failure state during creation."""
mock_describe_clusters.side_effect = [
self.describe_clusters(EcsClusterStates.PROVISIONING),
self.describe_clusters(EcsClusterStates.PROVISIONING, failures={"reason": "MISSING"}),
]
waiter = EcsHook(aws_conn_id=None).get_waiter("cluster_active")
with pytest.raises(WaiterError, match='matched expected path: "MISSING"'):
waiter.wait(clusters=["spam-egg"], WaiterConfig={"Delay": 0.01, "MaxAttempts": 3})
def test_cluster_inactive(self, mock_describe_clusters):
"""Test cluster reach Inactive state during deletion."""
mock_describe_clusters.side_effect = [
self.describe_clusters(EcsClusterStates.ACTIVE),
self.describe_clusters(EcsClusterStates.ACTIVE),
self.describe_clusters(EcsClusterStates.INACTIVE),
]
waiter = EcsHook(aws_conn_id=None).get_waiter("cluster_inactive")
waiter.wait(clusters=["spam-egg"], WaiterConfig={"Delay": 0.01, "MaxAttempts": 3})
def test_cluster_inactive_failure_reasons(self, mock_describe_clusters):
"""Test cluster reach failure state during deletion."""
mock_describe_clusters.side_effect = [
self.describe_clusters(EcsClusterStates.ACTIVE),
self.describe_clusters(EcsClusterStates.DEPROVISIONING),
self.describe_clusters(EcsClusterStates.DEPROVISIONING, failures={"reason": "MISSING"}),
]
waiter = EcsHook(aws_conn_id=None).get_waiter("cluster_inactive")
waiter.wait(clusters=["spam-egg"], WaiterConfig={"Delay": 0.01, "MaxAttempts": 3})
@staticmethod
def describe_task_definition(status: str | EcsTaskDefinitionStates, task_definition: str = "spam-egg"):
"""
Helper function for generate minimal DescribeTaskDefinition response for single job.
https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTaskDefinition.html
"""
if isinstance(status, EcsTaskDefinitionStates):
status = status.value
else:
assert status in EcsTaskDefinitionStates.__members__.values()
return {
"taskDefinition": {
"taskDefinitionArn": (
f"arn:aws:ecs:eu-west-3:123456789012:task-definition/{task_definition}:42"
),
"status": status,
}
}
| TestCustomECSServiceWaiters |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 35530,
"end": 36327
} | class ____:
"""
Definition of a single field within a `Struct` DataType.
Parameters
----------
name
The name of the field within its parent `Struct`.
dtype
The `DataType` of the field's values.
"""
name: str
dtype: PolarsDataType
def __init__(self, name: str, dtype: PolarsDataType) -> None:
self.name = name
self.dtype = polars.datatypes.parse_into_dtype(dtype)
def __eq__(self, other: Field) -> bool: # type: ignore[override]
return (self.name == other.name) & (self.dtype == other.dtype)
def __hash__(self) -> int:
return hash((self.name, self.dtype))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}({self.name!r}, {self.dtype})"
| Field |
python | boto__boto3 | boto3/resources/collection.py | {
"start": 12266,
"end": 19113
} | class ____:
"""
A factory to create new
:py:class:`CollectionManager` and :py:class:`ResourceCollection`
subclasses from a :py:class:`~boto3.resources.model.Collection`
model. These subclasses include methods to perform batch operations.
"""
def load_from_definition(
self, resource_name, collection_model, service_context, event_emitter
):
"""
Loads a collection from a model, creating a new
:py:class:`CollectionManager` subclass
with the correct properties and methods, named based on the service
and resource name, e.g. ec2.InstanceCollectionManager. It also
creates a new :py:class:`ResourceCollection` subclass which is used
by the new manager class.
:type resource_name: string
:param resource_name: Name of the resource to look up. For services,
this should match the ``service_name``.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
:type event_emitter: :py:class:`~botocore.hooks.HierarchialEmitter`
:param event_emitter: An event emitter
:rtype: Subclass of :py:class:`CollectionManager`
:return: The collection class.
"""
attrs = {}
collection_name = collection_model.name
# Create the batch actions for a collection
self._load_batch_actions(
attrs,
resource_name,
collection_model,
service_context.service_model,
event_emitter,
)
# Add the documentation to the collection class's methods
self._load_documented_collection_methods(
attrs=attrs,
resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter,
base_class=ResourceCollection,
)
if service_context.service_name == resource_name:
cls_name = (
f'{service_context.service_name}.{collection_name}Collection'
)
else:
cls_name = f'{service_context.service_name}.{resource_name}.{collection_name}Collection'
collection_cls = type(str(cls_name), (ResourceCollection,), attrs)
# Add the documentation to the collection manager's methods
self._load_documented_collection_methods(
attrs=attrs,
resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter,
base_class=CollectionManager,
)
attrs['_collection_cls'] = collection_cls
cls_name += 'Manager'
return type(str(cls_name), (CollectionManager,), attrs)
def _load_batch_actions(
self,
attrs,
resource_name,
collection_model,
service_model,
event_emitter,
):
"""
Batch actions on the collection become methods on both
the collection manager and iterators.
"""
for action_model in collection_model.batch_actions:
snake_cased = xform_name(action_model.name)
attrs[snake_cased] = self._create_batch_action(
resource_name,
snake_cased,
action_model,
collection_model,
service_model,
event_emitter,
)
def _load_documented_collection_methods(
factory_self,
attrs,
resource_name,
collection_model,
service_model,
event_emitter,
base_class,
):
# The base class already has these methods defined. However
# the docstrings are generic and not based for a particular service
# or resource. So we override these methods by proxying to the
# base class's builtin method and adding a docstring
# that pertains to the resource.
# A collection's all() method.
def all(self):
return base_class.all(self)
all.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
action_name='all',
event_emitter=event_emitter,
collection_model=collection_model,
service_model=service_model,
include_signature=False,
)
attrs['all'] = all
# The collection's filter() method.
def filter(self, **kwargs):
return base_class.filter(self, **kwargs)
filter.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
action_name='filter',
event_emitter=event_emitter,
collection_model=collection_model,
service_model=service_model,
include_signature=False,
)
attrs['filter'] = filter
# The collection's limit method.
def limit(self, count):
return base_class.limit(self, count)
limit.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
action_name='limit',
event_emitter=event_emitter,
collection_model=collection_model,
service_model=service_model,
include_signature=False,
)
attrs['limit'] = limit
# The collection's page_size method.
def page_size(self, count):
return base_class.page_size(self, count)
page_size.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
action_name='page_size',
event_emitter=event_emitter,
collection_model=collection_model,
service_model=service_model,
include_signature=False,
)
attrs['page_size'] = page_size
def _create_batch_action(
factory_self,
resource_name,
snake_cased,
action_model,
collection_model,
service_model,
event_emitter,
):
"""
Creates a new method which makes a batch operation request
to the underlying service API.
"""
action = BatchAction(action_model)
def batch_action(self, *args, **kwargs):
return action(self, *args, **kwargs)
batch_action.__name__ = str(snake_cased)
batch_action.__doc__ = docstring.BatchActionDocstring(
resource_name=resource_name,
event_emitter=event_emitter,
batch_action_model=action_model,
service_model=service_model,
collection_model=collection_model,
include_signature=False,
)
return batch_action
| CollectionFactory |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937.py | {
"start": 109779,
"end": 118860
} | class ____:
def _create_arrays(self):
return np.array([2]), np.array([3]), np.array([4]), (1,)
def test_one_arg_funcs(self):
argOne, _, _, tgtShape = self._create_arrays()
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(argOne)
assert_equal(out.shape, tgtShape)
def test_two_arg_funcs(self):
argOne, argTwo, _, tgtShape = self._create_arrays()
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = argTwo
out = func(argOne, argTwo)
assert_equal(out.shape, tgtShape)
out = func(argOne[0], argTwo)
assert_equal(out.shape, tgtShape)
out = func(argOne, argTwo[0])
assert_equal(out.shape, tgtShape)
def test_integers(self, endpoint):
_, _, _, tgtShape = self._create_arrays()
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, tgtShape)
def test_three_arg_funcs(self):
argOne, argTwo, argThree, tgtShape = self._create_arrays()
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(argOne, argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(argOne[0], argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(argOne, argTwo[0], argThree)
assert_equal(out.shape, tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the sha256 hashes of the initial and the final states' keys and
# the position of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert sha256.hexdigest() == config["initial"]["key_sha256"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert sha256.hexdigest() == config["jumped"]["key_sha256"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [.3, .7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
def test_ragged_shuffle():
# GH 18142
seq = [[], [], 1]
gen = Generator(MT19937(0))
assert_no_warnings(gen.shuffle, seq)
assert seq == [1, [], []]
@pytest.mark.parametrize("high", [-2, [-2]])
@pytest.mark.parametrize("endpoint", [True, False])
def test_single_arg_integer_exception(high, endpoint):
# GH 14333
gen = Generator(MT19937(0))
msg = 'high < 0' if endpoint else 'high <= 0'
with pytest.raises(ValueError, match=msg):
gen.integers(high, endpoint=endpoint)
msg = 'low > high' if endpoint else 'low >= high'
with pytest.raises(ValueError, match=msg):
gen.integers(-1, high, endpoint=endpoint)
with pytest.raises(ValueError, match=msg):
gen.integers([-1], high, endpoint=endpoint)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
def test_c_contig_req_out(dtype):
# GH 18704
out = np.empty((2, 3), order="F", dtype=dtype)
shape = [1, 2, 3]
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, dtype=dtype)
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dist", [random.standard_normal, random.random])
def test_contig_req_out(dist, order, dtype):
# GH 18704
out = np.empty((2, 3), dtype=dtype, order=order)
variates = dist(out=out, dtype=dtype)
assert variates is out
variates = dist(out=out, dtype=dtype, size=out.shape)
assert variates is out
def test_generator_ctor_old_style_pickle():
rg = np.random.Generator(np.random.PCG64DXSM(0))
rg.standard_normal(1)
# Directly call reduce which is used in pickling
ctor, (bit_gen, ), _ = rg.__reduce__()
# Simulate unpickling an old pickle that only has the name
assert bit_gen.__class__.__name__ == "PCG64DXSM"
print(ctor)
b = ctor(*("PCG64DXSM",))
print(b)
b.bit_generator.state = bit_gen.state
state_b = b.bit_generator.state
assert bit_gen.state == state_b
def test_pickle_preserves_seed_sequence():
# GH 26234
# Add explicit test that bit generators preserve seed sequences
import pickle
rg = np.random.Generator(np.random.PCG64DXSM(20240411))
ss = rg.bit_generator.seed_seq
rg_plk = pickle.loads(pickle.dumps(rg))
ss_plk = rg_plk.bit_generator.seed_seq
assert_equal(ss.state, ss_plk.state)
assert_equal(ss.pool, ss_plk.pool)
rg.bit_generator.seed_seq.spawn(10)
rg_plk = pickle.loads(pickle.dumps(rg))
ss_plk = rg_plk.bit_generator.seed_seq
assert_equal(ss.state, ss_plk.state)
@pytest.mark.parametrize("version", [121, 126])
def test_legacy_pickle(version):
# Pickling format was changes in 1.22.x and in 2.0.x
import gzip
import pickle
base_path = os.path.split(os.path.abspath(__file__))[0]
pkl_file = os.path.join(
base_path, "data", f"generator_pcg64_np{version}.pkl.gz"
)
with gzip.open(pkl_file) as gz:
rg = pickle.load(gz)
state = rg.bit_generator.state['state']
assert isinstance(rg, Generator)
assert isinstance(rg.bit_generator, np.random.PCG64)
assert state['state'] == 35399562948360463058890781895381311971
assert state['inc'] == 87136372517582989555478159403783844777
| TestSingleEltArrayInput |
python | google__jax | jax/experimental/source_mapper/common.py | {
"start": 1017,
"end": 1126
} | class ____(Protocol):
def __call__(self, work_dir, fn, f_args, f_kwargs, **kwargs) -> Any:
...
| CompileFn |
python | ray-project__ray | python/ray/tune/search/sample.py | {
"start": 882,
"end": 2294
} | class ____:
"""Thin wrapper to ensure backwards compatibility between
new and old numpy randomness generators.
"""
_rng = None
def __init__(
self,
generator_or_seed: Optional[
Union["np_random_generator", np.random.RandomState, int]
] = None,
):
if generator_or_seed is None or isinstance(
generator_or_seed, (np.random.RandomState, np_random_generator)
):
self._rng = generator_or_seed
elif LEGACY_RNG:
self._rng = np.random.RandomState(generator_or_seed)
else:
self._rng = np.random.default_rng(generator_or_seed)
@property
def legacy_rng(self) -> bool:
return not isinstance(self._rng, np_random_generator)
@property
def rng(self):
# don't set self._rng to np.random to avoid picking issues
return self._rng if self._rng is not None else np.random
def __getattr__(self, name: str) -> Any:
# https://numpy.org/doc/stable/reference/random/new-or-different.html
if self.legacy_rng:
if name == "integers":
name = "randint"
elif name == "random":
name = "rand"
return getattr(self.rng, name)
RandomState = Union[
None, _BackwardsCompatibleNumpyRng, np_random_generator, np.random.RandomState, int
]
@DeveloperAPI
| _BackwardsCompatibleNumpyRng |
python | rushter__MLAlgorithms | mla/ensemble/gbm.py | {
"start": 1632,
"end": 2008
} | class ____(Loss):
"""Logistic loss."""
def grad(self, actual, predicted):
return actual * expit(-actual * predicted)
def hess(self, actual, predicted):
expits = expit(predicted)
return expits * (1 - expits)
def transform(self, output):
# Apply logistic (sigmoid) function to the output
return expit(output)
| LogisticLoss |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 10680,
"end": 11018
} | class ____(PrivateKeyMsg):
type: KeyAlgo
p: mpint
q: mpint
g: mpint
y: mpint
x: mpint
comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
constraints: constraints = dataclasses.field(default=constraints(b''))
@dataclasses.dataclass(order=True, slots=True)
| DSAPrivateKeyMsg |
python | getsentry__sentry | src/sentry/incidents/subscription_processor.py | {
"start": 1988,
"end": 12620
} | class ____:
"""
Class for processing subscription updates for workflow engine. Accepts a subscription
and then can process one or more updates via `process_update`.
"""
def __init__(self, subscription: QuerySubscription) -> None:
self.subscription = subscription
self.detector: Detector | None = None
self.last_update = to_datetime(0)
# We're doing workflow engine processing, we need the Detector.
try:
self.detector = Detector.objects.get(
data_sources__source_id=str(self.subscription.id),
data_sources__type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION,
)
self.last_update = get_detector_last_update(self.detector, self.subscription.project_id)
except Detector.DoesNotExist:
logger.info("Detector not found", extra={"subscription_id": self.subscription.id})
def get_crash_rate_alert_metrics_aggregation_value(
self, subscription_update: QuerySubscriptionUpdate
) -> float | None:
"""
Handles validation and extraction of Crash Rate Alerts subscription updates values over
metrics dataset.
The subscription update looks like
[
{'project_id': 8, 'tags[5]': 6, 'count': 2.0, 'crashed': 1.0}
]
- `count` represents sessions or users sessions that were started, hence to get the crash
free percentage, we would need to divide number of crashed sessions by that number,
and subtract that value from 1. This is also used when CRASH_RATE_ALERT_MINIMUM_THRESHOLD is
set in the sense that if the minimum threshold is greater than the session count,
then the update is dropped. If the minimum threshold is not set then the total sessions
count is just ignored
- `crashed` represents the total sessions or user counts that crashed.
"""
aggregation_value = get_crash_rate_alert_metrics_aggregation_value_helper(
subscription_update
)
return aggregation_value
def get_aggregation_value(
self, subscription_update: QuerySubscriptionUpdate, comparison_delta: int | None = None
) -> float | None:
if self.subscription.snuba_query.dataset == Dataset.Metrics.value:
aggregation_value = self.get_crash_rate_alert_metrics_aggregation_value(
subscription_update
)
else:
aggregation_value = get_comparison_aggregation_value(
subscription_update=subscription_update,
snuba_query=self.subscription.snuba_query,
organization_id=self.subscription.project.organization.id,
project_ids=[self.subscription.project_id],
comparison_delta=comparison_delta,
alert_rule_id=None,
)
return aggregation_value
def get_comparison_delta(self, detector: Detector | None) -> int | None:
if detector:
detector_cfg: MetricIssueDetectorConfig = detector.config
return detector_cfg.get("comparison_delta")
return None
def process_results_workflow_engine(
self,
detector: Detector,
subscription_update: QuerySubscriptionUpdate,
aggregation_value: float,
) -> list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]]:
detector_cfg: MetricIssueDetectorConfig = detector.config
if detector_cfg["detection_type"] == AlertRuleDetectionType.DYNAMIC.value:
anomaly_detection_packet = AnomalyDetectionUpdate(
entity=subscription_update.get("entity", ""),
subscription_id=subscription_update["subscription_id"],
values={
"value": aggregation_value,
"source_id": str(self.subscription.id),
"subscription_id": subscription_update["subscription_id"],
"timestamp": self.last_update,
},
timestamp=self.last_update,
)
anomaly_detection_data_packet = DataPacket[AnomalyDetectionUpdate](
source_id=str(self.subscription.id), packet=anomaly_detection_packet
)
results = process_data_packet(
anomaly_detection_data_packet, DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION
)
else:
metric_packet = ProcessedSubscriptionUpdate(
entity=subscription_update.get("entity", ""),
subscription_id=subscription_update["subscription_id"],
values={"value": aggregation_value},
timestamp=self.last_update,
)
metric_data_packet = DataPacket[ProcessedSubscriptionUpdate](
source_id=str(self.subscription.id), packet=metric_packet
)
results = process_data_packet(metric_data_packet, DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION)
if features.has(
"organizations:workflow-engine-metric-alert-dual-processing-logs",
self.subscription.project.organization,
):
logger.info(
"incidents.workflow_engine.results",
extra={
"results": results,
"num_results": len(results),
"value": aggregation_value,
"detector_id": detector.id,
"subscription_update": subscription_update,
},
)
return results
def has_downgraded(self, dataset: str, organization: Organization) -> bool:
"""
Check if the organization has downgraded since the subscription was created, return early if True
"""
if dataset == "events" and not features.has("organizations:incidents", organization):
metrics.incr("incidents.alert_rules.ignore_update_missing_incidents")
return True
elif dataset == "transactions" and not features.has(
"organizations:performance-view", organization
):
metrics.incr("incidents.alert_rules.ignore_update_missing_incidents_performance")
return True
elif dataset == "generic_metrics" and not features.has(
"organizations:on-demand-metrics-extraction", organization
):
metrics.incr("incidents.alert_rules.ignore_update_missing_on_demand")
return True
return False
def process_update(self, subscription_update: QuerySubscriptionUpdate) -> bool:
"""
This is the core processing method utilized when Query Subscription Consumer fetches updates from kafka
"""
dataset = self.subscription.snuba_query.dataset
try:
# Check that the project exists
self.subscription.project
except Project.DoesNotExist:
metrics.incr("incidents.alert_rules.ignore_deleted_project")
return False
if self.subscription.project.status != ObjectStatus.ACTIVE:
metrics.incr("incidents.alert_rules.ignore_deleted_project")
return False
organization = self.subscription.project.organization
if self.has_downgraded(dataset, organization):
return False
if subscription_update["timestamp"] <= self.last_update:
metrics.incr("incidents.alert_rules.skipping_already_processed_update")
return False
self.last_update = subscription_update["timestamp"]
if (
len(subscription_update["values"]["data"]) > 1
and self.subscription.snuba_query.dataset != Dataset.Metrics.value
):
logger.warning(
"Subscription returned more than 1 row of data",
extra={
"subscription_id": self.subscription.id,
"dataset": self.subscription.snuba_query.dataset,
"snuba_subscription_id": self.subscription.subscription_id,
"result": subscription_update,
},
)
comparison_delta = None
with (
metrics.timer("incidents.alert_rules.process_update"),
track_memory_usage("incidents.alert_rules.process_update_memory"),
):
metrics.incr("incidents.alert_rules.process_update.start")
if self.detector is None:
logger.error(
"No detector found for subscription, skipping subscription processing",
extra={
"subscription_id": self.subscription.id,
"project_id": self.subscription.project.id,
},
)
return False
comparison_delta = self.get_comparison_delta(self.detector)
aggregation_value = self.get_aggregation_value(subscription_update, comparison_delta)
if aggregation_value is None:
metrics.incr("incidents.alert_rules.skipping_update_invalid_aggregation_value")
# We have an invalid aggregate, but we _did_ process the update, so we store
# last_update to reflect that and avoid reprocessing.
store_detector_last_update(
self.detector, self.subscription.project.id, self.last_update
)
return False
self.process_results_workflow_engine(
self.detector, subscription_update, aggregation_value
)
# Ensure that we have last_update stored for all Detector evaluations.
store_detector_last_update(
self.detector, self.subscription.project.id, self.last_update
)
return True
def build_detector_last_update_key(detector: Detector, project_id: int) -> str:
return f"detector:{detector.id}:project:{project_id}:last_update"
def get_detector_last_update(detector: Detector, project_id: int) -> datetime:
return to_datetime(
int(get_redis_client().get(build_detector_last_update_key(detector, project_id)) or "0")
)
def store_detector_last_update(detector: Detector, project_id: int, last_update: datetime) -> None:
get_redis_client().set(
build_detector_last_update_key(detector, project_id),
int(last_update.timestamp()),
ex=REDIS_TTL,
)
def get_redis_client() -> RetryingRedisCluster:
cluster_key = settings.SENTRY_INCIDENT_RULES_REDIS_CLUSTER
return redis.redis_clusters.get(cluster_key) # type: ignore[return-value]
| SubscriptionProcessor |
python | google__jax | jax/_src/pallas/triton/lowering.py | {
"start": 21841,
"end": 97688
} | class ____:
arg_classes: Sequence[jax.typing.DTypeLike]
op: Callable[..., ir.Value]
def matches(self, avals: Sequence[jax_core.ShapedArray]) -> bool:
if len(avals) != len(self.arg_classes):
return False
return all(
jnp.issubdtype(aval.dtype, arg_class)
for aval, arg_class in zip(avals, self.arg_classes)
)
def lower(self, ctx: LoweringRuleContext, *args: Sequence[ir.Value]):
[out_aval] = ctx.avals_out
bcast_args = []
for aval, arg in zip(ctx.avals_in, args):
bcast_args.append(_bcast_to(_ensure_ir_value(arg, aval), out_aval.shape))
return self.op(*args)
def _make_dispatch_table(
name: str, **tables: Sequence[_Extern | _Fallback]
) -> Callable[..., ir.Value]:
def inner(
ctx: LoweringRuleContext, *args: ir.Value, **_
) -> ir.Value:
table = tables[ctx.context.platform]
h = next((e for e in table if e.matches(ctx.avals_in)), None)
if h is None:
arg_aval_dtypes = tuple(aval.dtype for aval in ctx.avals_in)
raise NotImplementedError(
f"unsupported types for {name}: {arg_aval_dtypes}"
)
return h.lower(ctx, *args)
return inner
abs_dispatch_table = _make_dispatch_table(
"abs",
cuda=[
_Extern([jnp.int32], "__nv_abs", jnp.int32),
_Extern([jnp.int64], "__nv_llabs", jnp.int64),
_Extern([jnp.float32], "__nv_fabsf", jnp.float32),
_Extern([jnp.float64], "__nv_fabs", jnp.float64),
_Fallback([jnp.integer], math_dialect.absi),
_Fallback([jnp.floating], math_dialect.absf),
],
rocm=[
_Extern([jnp.float32], "__ocml_fabs_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_fabs_f64", jnp.float64),
_Fallback([jnp.integer], math_dialect.absi),
_Fallback([jnp.floating], math_dialect.absf),
],
)
ceil_dispatch_table = _make_dispatch_table(
"ceil",
cuda=[
_Extern([jnp.float32], "__nv_ceilf", jnp.float32),
_Extern([jnp.float64], "__nv_ceil", jnp.float64),
_Fallback([jnp.floating], math_dialect.ceil),
],
rocm=[
_Extern([jnp.float32], "__ocml_ceil_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_ceil_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.ceil),
],
)
floor_dispatch_table = _make_dispatch_table(
"floor",
cuda=[
_Extern([jnp.float32], "__nv_floorf", jnp.float32),
_Extern([jnp.float64], "__nv_floor", jnp.float64),
_Fallback([jnp.floating], math_dialect.floor),
],
rocm=[
_Extern([jnp.float32], "__ocml_floor_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_floor_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.floor),
],
)
exp_dispatch_table = _make_dispatch_table(
"exp",
cuda=[
_Extern([jnp.float32], "__nv_expf", jnp.float32),
_Extern([jnp.float64], "__nv_exp", jnp.float64),
_Fallback([jnp.floating], math_dialect.exp),
],
rocm=[
_Extern([jnp.float32], "__ocml_exp_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_exp_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.exp),
],
)
exp2_dispatch_table = _make_dispatch_table(
"exp2",
cuda=[
_Extern([jnp.float32], "__nv_exp2f", jnp.float32),
_Extern([jnp.float64], "__nv_exp2", jnp.float64),
_Fallback([jnp.floating], math_dialect.exp2),
],
rocm=[
_Extern([jnp.float32], "__ocml_exp2_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_exp2_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.exp2),
],
)
expm1_dispatch_table = _make_dispatch_table(
"expm1",
cuda=[
_Extern([jnp.float32], "__nv_expm1f", jnp.float32),
_Extern([jnp.float64], "__nv_expm1", jnp.float64),
_Fallback([jnp.floating], math_dialect.expm1),
],
rocm=[
_Extern([jnp.float32], "__ocml_expm1_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_expm1_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.expm1),
],
)
log_dispatch_table = _make_dispatch_table(
"log",
cuda=[
_Extern([jnp.float32], "__nv_logf", jnp.float32),
_Extern([jnp.float64], "__nv_log", jnp.float64),
_Fallback([jnp.floating], math_dialect.log),
],
rocm=[
_Extern([jnp.float32], "__ocml_log_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_log_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.log),
],
)
log1p_dispatch_table = _make_dispatch_table(
"log1p",
cuda=[
_Extern([jnp.float32], "__nv_log1pf", jnp.float32),
_Extern([jnp.float64], "__nv_log1p", jnp.float64),
_Fallback([jnp.floating], math_dialect.log1p),
],
rocm=[
_Extern([jnp.float32], "__ocml_log1p_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_log1p_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.log1p),
],
)
sqrt_dispatch_table = _make_dispatch_table(
"sqrt",
cuda=[
_Extern([jnp.float32], "__nv_sqrtf", jnp.float32),
_Extern([jnp.float64], "__nv_sqrt", jnp.float64),
_Fallback([jnp.floating], math_dialect.sqrt),
],
rocm=[
_Extern([jnp.float32], "__ocml_sqrt_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_sqrt_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.sqrt),
],
)
pow_dispatch_table = _make_dispatch_table(
"pow",
cuda=[
_Extern([jnp.float32, jnp.int32], "__nv_powif", jnp.float32),
_Extern([jnp.float64, jnp.int32], "__nv_powi", jnp.float64),
_Fallback(
[jnp.floating, jnp.integer],
math_dialect.fpowi
),
_Extern([jnp.float32, jnp.float32], "__nv_powf", jnp.float32),
_Extern([jnp.float64, jnp.float64], "__nv_pow", jnp.float64),
_Fallback(
[jnp.floating, jnp.floating],
math_dialect.powf
),
],
rocm=[
_Extern([jnp.float32, jnp.int32], "__ocml_pown_f32", jnp.float32),
_Extern([jnp.float64, jnp.int32], "__ocml_pown_f64", jnp.float64),
_Fallback(
[jnp.floating, jnp.integer],
math_dialect.fpowi
),
_Extern([jnp.float32, jnp.float32], "__ocml_pow_f32", jnp.float32),
_Extern([jnp.float64, jnp.float64], "__ocml_pow_f64", jnp.float64),
_Fallback(
[jnp.floating, jnp.floating],
math_dialect.powf
),
],
)
cbrt_dispatch_table = _make_dispatch_table(
"cbrt",
cuda=[
_Extern([jnp.float32], "__nv_cbrtf", jnp.float32),
_Extern([jnp.float64], "__nv_cbrt", jnp.float64),
_Fallback([jnp.floating], math_dialect.cbrt),
],
rocm=[
_Extern([jnp.float32], "__ocml_cbrt_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_cbrt_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.cbrt),
],
)
rsqrt_dispatch_table = _make_dispatch_table(
"rsqrt",
cuda=[
_Extern([jnp.float32], "__nv_rsqrtf", jnp.float32),
_Extern([jnp.float64], "__nv_rsqrt", jnp.float64),
_Fallback([jnp.floating], math_dialect.rsqrt),
],
rocm=[
_Extern([jnp.float32], "__ocml_rsqrt_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_rsqrt_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.rsqrt),
],
)
sin_dispatch_table = _make_dispatch_table(
"sin",
cuda=[
_Extern([jnp.float32], "__nv_sinf", jnp.float32),
_Extern([jnp.float64], "__nv_sin", jnp.float64),
_Fallback([jnp.floating], math_dialect.sin),
],
rocm=[
_Extern([jnp.float32], "__ocml_sin_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_sin_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.sin),
],
)
cos_dispatch_table = _make_dispatch_table(
"cos",
cuda=[
_Extern([jnp.float32], "__nv_cosf", jnp.float32),
_Extern([jnp.float64], "__nv_cos", jnp.float64),
_Fallback([jnp.floating], math_dialect.cos),
],
rocm=[
_Extern([jnp.float32], "__ocml_cos_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_cos_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.cos),
],
)
tan_dispatch_table = _make_dispatch_table(
"tan",
cuda=[
_Extern([jnp.float32], "__nv_tanf", jnp.float32),
_Extern([jnp.float64], "__nv_tan", jnp.float64),
_Fallback([jnp.floating], math_dialect.tan),
],
rocm=[
_Extern([jnp.float32], "__ocml_tan_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_tan_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.tan),
],
)
asin_dispatch_table = _make_dispatch_table(
"asin",
cuda=[
_Extern([jnp.float32], "__nv_asinf", jnp.float32),
_Extern([jnp.float64], "__nv_asin", jnp.float64),
_Fallback([jnp.floating], math_dialect.asin),
],
rocm=[
_Extern([jnp.float32], "__ocml_asin_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_asin_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.asin),
],
)
acos_dispatch_table = _make_dispatch_table(
"acos",
cuda=[
_Extern([jnp.float32], "__nv_acosf", jnp.float32),
_Extern([jnp.float64], "__nv_acos", jnp.float64),
_Fallback([jnp.floating], math_dialect.acos),
],
rocm=[
_Extern([jnp.float32], "__ocml_acos_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_acos_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.acos),
],
)
atan_dispatch_table = _make_dispatch_table(
"atan",
cuda=[
_Extern([jnp.float32], "__nv_atanf", jnp.float32),
_Extern([jnp.float64], "__nv_atan", jnp.float64),
_Fallback([jnp.floating], math_dialect.atan),
],
rocm=[
_Extern([jnp.float32], "__ocml_atan_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_atan_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.atan),
],
)
atan2_dispatch_table = _make_dispatch_table(
"atan2",
cuda=[
_Extern([jnp.float32, jnp.float32], "__nv_atan2f", jnp.float32),
_Extern([jnp.float64, jnp.float64], "__nv_atan2", jnp.float64),
_Fallback([jnp.floating, jnp.floating], math_dialect.atan2),
],
rocm=[
_Extern([jnp.float32, jnp.float32], "__ocml_atan2_f32", jnp.float32),
_Extern([jnp.float64, jnp.float64], "__ocml_atan2_f64", jnp.float64),
_Fallback([jnp.floating, jnp.floating], math_dialect.atan2),
],
)
sinh_dispatch_table = _make_dispatch_table(
"sinh",
cuda=[
_Extern([jnp.float32], "__nv_sinhf", jnp.float32),
_Extern([jnp.float64], "__nv_sinh", jnp.float64),
_Fallback([jnp.floating], math_dialect.sinh),
],
rocm=[
_Extern([jnp.float32], "__ocml_sinh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_sinh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.sinh),
],
)
cosh_dispatch_table = _make_dispatch_table(
"cosh",
cuda=[
_Extern([jnp.float32], "__nv_coshf", jnp.float32),
_Extern([jnp.float64], "__nv_cosh", jnp.float64),
_Fallback([jnp.floating], math_dialect.cosh),
],
rocm=[
_Extern([jnp.float32], "__ocml_cosh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_cosh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.cosh),
],
)
tanh_dispatch_table = _make_dispatch_table(
"tanh",
cuda=[
_Extern([jnp.float32], "__nv_tanhf", jnp.float32),
_Extern([jnp.float64], "__nv_tanh", jnp.float64),
_Fallback([jnp.floating], math_dialect.tanh),
],
rocm=[
_Extern([jnp.float32], "__ocml_tanh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_tanh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.tanh),
],
)
asinh_dispatch_table = _make_dispatch_table(
"asinh",
cuda=[
_Extern([jnp.float32], "__nv_asinhf", jnp.float32),
_Extern([jnp.float64], "__nv_asinh", jnp.float64),
_Fallback([jnp.floating], math_dialect.asinh),
],
rocm=[
_Extern([jnp.float32], "__ocml_asinh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_asinh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.asinh),
],
)
acosh_dispatch_table = _make_dispatch_table(
"acosh",
cuda=[
_Extern([jnp.float32], "__nv_acoshf", jnp.float32),
_Extern([jnp.float64], "__nv_acosh", jnp.float64),
_Fallback([jnp.floating], math_dialect.acosh),
],
rocm=[
_Extern([jnp.float32], "__ocml_acosh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_acosh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.acosh),
],
)
atanh_dispatch_table = _make_dispatch_table(
"atanh",
cuda=[
_Extern([jnp.float32], "__nv_atanhf", jnp.float32),
_Extern([jnp.float64], "__nv_atanh", jnp.float64),
_Fallback([jnp.floating], math_dialect.atanh),
],
rocm=[
_Extern([jnp.float32], "__ocml_atanh_f32", jnp.float32),
_Extern([jnp.float64], "__ocml_atanh_f64", jnp.float64),
_Fallback([jnp.floating], math_dialect.atanh),
],
)
population_count_dispatch_table = _make_dispatch_table(
"population_count",
cuda=[
_Extern([jnp.int32], "__nv_popc", jnp.int32),
_Extern([jnp.int64], "__nv_popcll", jnp.int32),
_Fallback([jnp.integer], math_dialect.ctpop),
],
rocm=[
_Extern([jnp.int32], "__ockl_popcount_u32", jnp.int32),
_Extern([jnp.int64], "__ockl_popcount_u64", jnp.int64),
_Fallback([jnp.integer], math_dialect.ctpop),
],
)
clz_dispatch_table = _make_dispatch_table(
"clz",
cuda=[
_Extern([jnp.int32], "__nv_clz", jnp.int32),
_Extern([jnp.int64], "__nv_clzll", jnp.int32),
_Fallback([jnp.integer], math_dialect.ctlz),
],
rocm=[
_Extern([jnp.int32], "__ockl_clz_u32", jnp.int32),
_Extern([jnp.int64], "__ockl_clz_u64", jnp.int64),
_Fallback([jnp.integer], math_dialect.ctlz),
],
)
nextafter_dispatch_table = _make_dispatch_table(
"nextafter",
cuda=[
_Extern([jnp.float32, jnp.float32], "__nv_nextafterf", jnp.float32),
_Extern([jnp.float64, jnp.float64], "__nv_nextafter", jnp.float64),
],
rocm=[
_Extern(
[jnp.float32, jnp.float32], "__ocml_nextafter_f32", jnp.float32
),
_Extern(
[jnp.float64, jnp.float64], "__ocml_nextafter_f64", jnp.float64
),
],
)
triton_lowering_rules.update({
lax.abs_p: abs_dispatch_table,
lax.neg_p: lambda ctx, x: _minus(x),
lax.ceil_p: ceil_dispatch_table,
lax.floor_p: floor_dispatch_table,
lax.exp_p: exp_dispatch_table,
lax.exp2_p: exp2_dispatch_table,
lax.expm1_p: expm1_dispatch_table,
lax.log_p: log_dispatch_table,
lax.log1p_p: log1p_dispatch_table,
lax.sqrt_p: sqrt_dispatch_table,
lax.square_p: lambda ctx, x: _mul(x, x),
lax.pow_p: pow_dispatch_table,
lax.cbrt_p: cbrt_dispatch_table,
lax.rsqrt_p: rsqrt_dispatch_table,
lax.sin_p: sin_dispatch_table,
lax.cos_p: cos_dispatch_table,
lax.tan_p: tan_dispatch_table,
lax.asin_p: asin_dispatch_table,
lax.acos_p: acos_dispatch_table,
lax.atan_p: atan_dispatch_table,
lax.atan2_p: atan2_dispatch_table,
lax.sinh_p: sinh_dispatch_table,
lax.cosh_p: cosh_dispatch_table,
lax.tanh_p: tanh_dispatch_table,
lax.asinh_p: asinh_dispatch_table,
lax.acosh_p: acosh_dispatch_table,
lax.atanh_p: atanh_dispatch_table,
lax.population_count_p: population_count_dispatch_table,
lax.clz_p: clz_dispatch_table,
lax.nextafter_p: nextafter_dispatch_table,
})
def _minus(x: ir.Value) -> ir.Value:
if tt_dialect.PointerType.isinstance(_element_type(x.type)):
raise NotImplementedError(f"unsupported type: {x.type}")
return _sub(_zeros_like(x), x)
def _add(x: ir.Value, y: ir.Value):
x_element_type = _element_type(x.type)
y_element_type = _element_type(y.type)
if tt_dialect.PointerType.isinstance(x_element_type):
assert not tt_dialect.PointerType.isinstance(y_element_type)
return tt_dialect.addptr(x.type, x, y)
if tt_dialect.PointerType.isinstance(y_element_type):
return tt_dialect.addptr(y.type, y, x)
assert x.type == y.type, (str(x.type), str(y.type))
if isinstance(x_element_type, ir.IntegerType):
return arith_dialect.addi(x, y)
if isinstance(x_element_type, ir.FloatType):
return arith_dialect.addf(x, y)
raise NotImplementedError(f"unsupported dtypes: {x.type} and {y.type}")
def _sub(x: ir.Value, y: ir.Value) -> ir.Value:
x_element_type = _element_type(x.type)
y_element_type = _element_type(y.type)
if tt_dialect.PointerType.isinstance(x_element_type):
return tt_dialect.addptr(x.type, x, _minus(y))
elif not tt_dialect.PointerType.isinstance(y_element_type):
assert x.type == y.type, (str(x.type), str(y.type))
if isinstance(x_element_type, ir.IntegerType):
return arith_dialect.subi(x, y)
elif isinstance(x_element_type, ir.FloatType):
return arith_dialect.subf(x, y)
raise NotImplementedError(f"unsupported dtype: {y.type}")
def _mul(x: ir.Value, y: ir.Value) -> ir.Value:
assert x.type == y.type, (str(x.type), str(y.type))
x_element_type = _element_type(x.type)
if isinstance(x_element_type, ir.IntegerType):
return arith_dialect.muli(x, y)
elif isinstance(x_element_type, ir.FloatType):
return arith_dialect.mulf(x, y)
raise NotImplementedError(f"unsupported types: {x.type} and {y.type}")
def _floordiv(x: ir.Value, y: ir.Value, *, signed: bool) -> ir.Value:
assert x.type == y.type, (str(x.type), str(y.type))
x_element_type = _element_type(x.type)
if isinstance(x_element_type, (ir.F32Type, ir.F64Type)):
return arith_dialect.divf(x, y)
if not isinstance(x_element_type, ir.IntegerType):
raise NotImplementedError(f"unsupported types: {x.type} and {y.type}")
if signed:
return arith_dialect.divsi(x, y)
else:
return arith_dialect.divui(x, y)
def _truediv(x: ir.Value, y: ir.Value, *, signed: bool) -> ir.Value:
assert x.type == y.type, (str(x.type), str(y.type))
x_element_type = _element_type(x.type)
if isinstance(x_element_type, ir.IntegerType):
x_element_type = ir.F32Type.get()
x = _int_float_cast(x, x_element_type, signed=signed)
y = _int_float_cast(y, x_element_type, signed=signed)
if isinstance(x_element_type, (ir.F32Type, ir.F64Type)):
return arith_dialect.divf(x, y)
raise NotImplementedError(f"unsupported types: {x.type} and {y.type}")
def _mod(x: ir.Value, y: ir.Value, *, signed: bool) -> ir.Value:
assert x.type == y.type, (str(x.type), str(y.type))
x_element_type = _element_type(x.type)
if isinstance(x_element_type, ir.FloatType):
return arith_dialect.remf(x, y)
if not isinstance(x_element_type, ir.IntegerType):
raise NotImplementedError(f"unsupported types: {x.type} and {y.type}")
if signed:
return arith_dialect.remsi(x, y)
else:
return arith_dialect.remui(x, y)
def _cmp(
x: ir.Value,
y: ir.Value,
si_pred: arith_dialect.CmpIPredicate,
ui_pred: arith_dialect.CmpIPredicate,
f_pred: arith_dialect.CmpFPredicate,
*,
signed: bool,
) -> ir.Value:
assert x.type == y.type, (str(x.type), str(y.type))
x_element_type = _element_type(x.type)
if isinstance(x_element_type, ir.IntegerType):
return arith_dialect.cmpi(si_pred if signed else ui_pred, x, y)
elif isinstance(x_element_type, ir.FloatType):
return arith_dialect.cmpf(f_pred, x, y)
else:
raise NotImplementedError(f"unsupported types: {x.type} and {y.type}")
_equal = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.eq,
ui_pred=arith_dialect.CmpIPredicate.eq,
f_pred=arith_dialect.CmpFPredicate.OEQ,
)
_not_equal = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.ne,
ui_pred=arith_dialect.CmpIPredicate.ne,
f_pred=arith_dialect.CmpFPredicate.UNE,
)
_less_than = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.slt,
ui_pred=arith_dialect.CmpIPredicate.ult,
f_pred=arith_dialect.CmpFPredicate.OLT,
)
_less_equal = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.sle,
ui_pred=arith_dialect.CmpIPredicate.ule,
f_pred=arith_dialect.CmpFPredicate.OLE,
)
_greater_than = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.sgt,
ui_pred=arith_dialect.CmpIPredicate.ugt,
f_pred=arith_dialect.CmpFPredicate.OGT,
)
_greater_equal = functools.partial(
_cmp,
si_pred=arith_dialect.CmpIPredicate.sge,
ui_pred=arith_dialect.CmpIPredicate.uge,
f_pred=arith_dialect.CmpFPredicate.OGE,
)
def _is_nan(x: ir.Value) -> ir.Value:
return arith_dialect.cmpf(arith_dialect.CmpFPredicate.UNO, x, x)
_JAX_TO_TRITON_BINARY = {
lax.add_p: _add,
lax.sub_p: _sub,
lax.mul_p: _mul,
lax.and_p: arith_dialect.andi,
lax.or_p: arith_dialect.ori,
lax.xor_p: arith_dialect.xori,
lax.shift_left_p: arith_dialect.shli,
lax.shift_right_arithmetic_p: arith_dialect.shrsi,
lax.shift_right_logical_p: arith_dialect.shrui,
ad_util.add_any_p: _add,
}
for prim, fn in _JAX_TO_TRITON_BINARY.items():
def signless_rule(ctx: LoweringRuleContext, x, y, fn=fn):
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return fn(x, y)
triton_lowering_rules[prim] = signless_rule
_JAX_TO_TRITON_SIGNED_BINARY = {
lax.rem_p: _mod,
lax.eq_p: _equal,
lax.ne_p: _not_equal,
lax.gt_p: _greater_than,
lax.ge_p: _greater_equal,
lax.lt_p: _less_than,
lax.le_p: _less_equal,
}
for prim, fn in _JAX_TO_TRITON_SIGNED_BINARY.items():
def signed_rule(ctx: LoweringRuleContext, x, y, fn=fn):
x_aval, _ = ctx.avals_in
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return fn(x, y, signed=jnp.issubdtype(x_aval.dtype, jnp.signedinteger))
triton_lowering_rules[prim] = signed_rule
@register_lowering(debugging.debug_print_p)
def debug_print_lowering_rule(
ctx: LoweringRuleContext,
*args: ir.Value,
fmt: str,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
del partitioned, np_printoptions
if ordered:
raise NotImplementedError("Ordered debug_print is not supported on Pallas.")
if has_placeholders:
raise ValueError(
"pl.debug_print() does not support placeholders when lowering to Triton"
)
args, kwargs = debugging.merge_callback_args(in_tree, args, static_args)
if kwargs:
raise ValueError(
"Only positional arguments are supported by debug_print on Pallas."
)
tt_dialect.print_(
f" {fmt} ",
hex=False,
args=args,
is_signed=ir.DenseI32ArrayAttr.get([
jnp.issubdtype(aval.dtype, jnp.signedinteger) for aval in ctx.avals_in
]),
)
return ()
def _set_attr(v: ir.Value, name: str, attr: ir.Attribute) -> None:
if not ir.BlockArgument.isinstance(v):
v.owner.attributes[name] = attr
return
arg = ir.BlockArgument(v)
name += f"_arg{arg.arg_number}"
owner = arg.owner
is_entry = owner.region.blocks[0] == owner
if not is_entry:
return
if (op := owner.owner.operation) and not isinstance(op, tt_dialect.FuncOp):
op.attributes[name] = attr
@register_lowering(primitives.multiple_of_p)
def _multiple_of_rule(ctx: LoweringRuleContext, x, values: Sequence[int]):
[x_aval] = ctx.avals_in
assert max(1, len(x_aval.shape)) == len(values)
_set_attr(
x,
"tt.divisibility",
ir.DenseIntElementsAttr.get(np.asarray(values, dtype=np.int32)),
)
return x
@register_lowering(primitives.max_contiguous_p)
def _max_contiguous_rule(ctx: LoweringRuleContext, x, values: Sequence[int]):
[x_aval] = ctx.avals_in
assert len(x_aval.shape) == len(values)
_set_attr(
x,
"tt.contiguity",
ir.DenseIntElementsAttr.get(np.asarray(values, dtype=np.int32)),
)
return x
@register_lowering(sp.broadcast_to_p)
def _broadcast_to_rule(ctx: LoweringRuleContext, x, shape: Sequence[int]):
(x_aval,) = ctx.avals_in
return _bcast_to(_ensure_ir_value(x, x_aval), shape)
@register_lowering(lax.integer_pow_p)
def _integer_pow_rule(ctx: LoweringRuleContext, x, *, y: int):
if y == 0:
return _ones_like(x)
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
y, mod = divmod(y, 2)
if mod:
acc = x if acc is None else _mul(acc, x)
if y > 0:
x = _mul(x, x)
assert acc is not None
[x_aval] = ctx.avals_in
[out_aval] = ctx.avals_out
acc = _cast(acc, x_aval.dtype, out_aval.dtype)
if is_reciprocal:
signed = jnp.issubdtype(out_aval.dtype, jnp.signedinteger)
return _truediv(_ones_like(acc), acc, signed=signed)
else:
return acc
_JAX_FN_MAPPING = {
lax.clamp_p: lambda min, a, max: jnp.minimum(jnp.maximum(min, a), max),
lax.logistic_p: lambda a, accuracy: 1 / (1 + jnp.exp(-a)),
}
for prim, fn in _JAX_FN_MAPPING.items():
triton_lowering_rules[prim] = lower_fun(fn, multiple_results=False)
@register_lowering(lax.min_p)
def _min_lowering_rule(ctx: LoweringRuleContext, x, y):
# TODO(slebedev): Consider allowing customizing nan behavior.
x_aval, y_aval = ctx.avals_in
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
if jnp.issubdtype(x_aval.dtype, jnp.floating):
# TODO(slebedev): Triton promotes bfloat16 to float32 and back here.
return arith_dialect.minnumf(x, y)
if not jnp.issubdtype(x_aval.dtype, jnp.integer):
raise NotImplementedError(
f"unsupported dtypes: {x_aval.dtype} and {y_aval.dtype}"
)
if jnp.issubdtype(x_aval.dtype, jnp.signedinteger):
return arith_dialect.minsi(x, y)
else:
return arith_dialect.minui(x, y)
@register_lowering(lax.max_p)
def _max_lowering_rule(ctx: LoweringRuleContext, x, y):
# TODO(slebedev): Consider allowing customizing nan behavior.
x_aval, y_aval = ctx.avals_in
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
if jnp.issubdtype(x_aval.dtype, jnp.floating):
# TODO(slebedev): Triton promotes bfloat16 to float32 and back here.
return arith_dialect.maxnumf(x, y)
if not jnp.issubdtype(x_aval.dtype, jnp.integer):
raise NotImplementedError(
f"unsupported dtypes: {x_aval.dtype} and {y_aval.dtype}"
)
if jnp.issubdtype(x_aval.dtype, jnp.signedinteger):
return arith_dialect.maxsi(x, y)
else:
return arith_dialect.maxui(x, y)
@register_lowering(lax.div_p)
def _div_lowering_rule(ctx: LoweringRuleContext, x, y):
x_aval, y_aval = ctx.avals_in
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
signed = jnp.issubdtype(x_aval.dtype, jnp.signedinteger) or jnp.issubdtype(
y_aval.dtype, jnp.signedinteger
)
if jnp.issubdtype(x_aval.dtype, np.floating) or jnp.issubdtype(
y_aval.dtype, np.floating
):
return _truediv(x, y, signed=signed)
return _floordiv(x, y, signed=signed)
register_lowering(lax.sign_p)(
lower_fun(pallas_utils.sign_lowering_helper, multiple_results=False)
)
register_lowering(lax.erf_inv_p)(
lower_fun(pallas_utils.erf_inv_lowering_helper, multiple_results=False)
)
@register_lowering(lax.iota_p)
def _iota_lowering_rule(ctx: LoweringRuleContext, *, dtype, shape, dimension,
sharding):
iota = _make_range(0, shape[dimension])
iota = _cast(iota, jnp.int32, dtype)
for i in range(len(shape)):
if i != dimension:
iota = _expand_dims(iota, i)
return _bcast_to(iota, shape)
def _element_type(t: ir.Type) -> ir.Type:
if ir.RankedTensorType.isinstance(t):
return ir.RankedTensorType(t).element_type
else:
return t
def _make_range(start: int, end: int) -> ir.Value:
if end <= start:
raise ValueError(
f"end must be greater than start, but got: {end} <= {start}"
)
if max(start, end) >= 2**32:
raise ValueError("start and end must fit in int32")
return tt_dialect.make_range(
ir.RankedTensorType.get([end - start], ir.IntegerType.get_signless(32)),
start,
end,
)
def _full(t: ir.Type, v: object) -> ir.Type:
element_type = _element_type(t)
if isinstance(element_type, ir.IntegerType):
result = arith_dialect.constant(element_type, int(v))
elif isinstance(element_type, ir.FloatType):
result = arith_dialect.constant(element_type, float(v))
else:
raise NotImplementedError
if ir.RankedTensorType.isinstance(t):
return tt_dialect.splat(t, result)
else:
return result
def _zeros(t: ir.Type) -> ir.Value:
return _full(t, 0)
def _zeros_like(x: ir.Value) -> ir.Value:
return _full(x.type, 0)
def _ones(t: ir.Type) -> ir.Value:
return _full(t, 1)
def _ones_like(x: ir.Value) -> ir.Value:
return _full(x.type, 1)
def _splat(x: ir.value, shape: Sequence[int]) -> ir.Value:
if ir.RankedTensorType.isinstance(x.type):
raise TypeError("cannot splat a tensor")
if not shape:
return x
return tt_dialect.splat(ir.RankedTensorType.get(shape, x.type), x)
def _expand_dims(x: ir.Value, axis: int) -> ir.Value:
if not ir.RankedTensorType.isinstance(x.type):
shape = list(ir.RankedTensorType(x.type).shape)
shape.insert(axis, 1)
return _splat(x, shape)
return tt_dialect.expand_dims(x, axis)
def _float_float_cast(src: ir.Value, dst_type: ir.Type) -> ir.Value:
src_element_type = ir.FloatType(_element_type(src.type))
dst_element_type = ir.FloatType(_element_type(dst_type))
if src_element_type.width == 8 or dst_element_type.width == 8:
rounding = (
tt_dialect.RoundingMode.RTNE if src_element_type.width > 8 else None
)
return tt_dialect.fp_to_fp(dst_type, src, rounding=rounding)
if src_element_type.width > dst_element_type.width:
return arith_dialect.truncf(dst_type, src)
elif src_element_type.width < dst_element_type.width:
return arith_dialect.extf(dst_type, src)
else:
raise NotImplementedError
def _int_int_cast(src: ir.Value, dst_type: ir.Type, signed: bool) -> ir.Value:
src_element_type = ir.IntegerType(_element_type(src.type))
dst_element_type = ir.IntegerType(_element_type(dst_type))
assert src_element_type != dst_element_type
if dst_element_type.width == 1:
return _not_equal(src, _zeros_like(src), signed=signed)
if src_element_type.width == dst_element_type.width:
return arith_dialect.bitcast(dst_type, src)
elif src_element_type.width > dst_element_type.width:
return arith_dialect.trunci(dst_type, src)
elif signed and src_element_type.width != 1:
return arith_dialect.extsi(dst_type, src)
else:
return arith_dialect.extui(dst_type, src)
def _float_int_cast(
src: ir.Value, dst_type: ir.Type, *, signed: bool
) -> ir.Value:
src_element_type = _element_type(src.type)
if not isinstance(src_element_type, (ir.BF16Type, ir.F16Type, ir.F32Type, ir.F64Type)):
raise NotImplementedError(f"cannot cast {src} tp {dst_type}")
dst_element_type = ir.IntegerType(_element_type(dst_type))
if dst_element_type.width == 1:
return _not_equal(src, _zeros_like(src), signed=signed)
else:
# We clamp the float value to the min/max integer destination value
# in order to match JAX/XLA casting behavior. Note that this differs
# from numpy casting behavior.
if signed:
maxint = 2**(dst_element_type.width-1) - 1
minint = -2**(dst_element_type.width-1)
else:
maxint = 2**dst_element_type.width - 1
minint = 0
src = arith_dialect.minimumf(src, _full(src.type, maxint))
src = arith_dialect.maximumf(src, _full(src.type, minint))
if signed:
return arith_dialect.fptosi(dst_type, src)
else:
return arith_dialect.fptoui(dst_type, src)
def _int_float_cast(
src: ir.Value, dst_type: ir.Type, *, signed: bool
) -> ir.Value:
src_element_type = ir.IntegerType(_element_type(src.type))
dst_element_type = _element_type(dst_type)
if not isinstance(
dst_element_type, (ir.BF16Type, ir.F16Type, ir.F32Type, ir.F64Type)
):
raise NotImplementedError(f"cannot cast {src} tp {dst_type}")
if src_element_type.width == 1 or not signed:
return arith_dialect.uitofp(dst_type, src)
else:
return arith_dialect.sitofp(dst_type, src)
def _cast(
src: ir.Value,
src_type: jax.typing.DTypeLike,
dst_type: jax.typing.DTypeLike,
) -> ir.Value:
return _ir_cast(
src,
_dtype_to_ir_type(dst_type),
signed=jnp.issubdtype(src_type, jnp.signedinteger),
dst_signed=jnp.issubdtype(dst_type, jnp.signedinteger),
)
def _ir_cast(src: ir.Value, dst_type: ir.Type, *,
signed: bool, dst_signed: bool = False) -> ir.Value:
if ir.RankedTensorType.isinstance(
src.type
) and not ir.RankedTensorType.isinstance(dst_type):
src_type = ir.RankedTensorType(src.type)
dst_type = ir.RankedTensorType.get(
src_type.shape,
dst_type,
src_type.encoding,
)
if src.type == dst_type:
return src
src_element_type = _element_type(src.type)
dst_element_type = _element_type(dst_type)
if isinstance(src_element_type, ir.Float8E4M3FNUZType) or isinstance(
dst_element_type, ir.Float8E4M3FNUZType
):
# TODO(slebedev): Check the CUDA version and raise conditionally.
raise NotImplementedError("cannot cast from or to float8_e4m3fnuz")
if isinstance(src_element_type, (ir.F16Type, ir.BF16Type)) and not isinstance(
dst_element_type, ir.F32Type
):
return _ir_cast(
_ir_cast(src, ir.F32Type.get(), signed=False),
dst_type, signed=False, dst_signed=dst_signed
)
if isinstance(src_element_type, ir.FloatType) and isinstance(
dst_element_type, ir.FloatType
):
return _float_float_cast(src, dst_type)
if isinstance(src_element_type, ir.IntegerType) and isinstance(
dst_element_type, ir.IntegerType
):
return _int_int_cast(src, dst_type, signed=signed)
if isinstance(src_element_type, ir.FloatType) and isinstance(
dst_element_type, ir.IntegerType
):
return _float_int_cast(src, dst_type, signed=dst_signed)
if isinstance(src_element_type, ir.IntegerType) and isinstance(
dst_element_type, ir.FloatType
):
return _int_float_cast(src, dst_type, signed=signed)
if tt_dialect.PointerType.isinstance(src_element_type) and isinstance(
dst_element_type, ir.IntegerType
):
if dst_element_type.width == 64:
return tt_dialect.ptr_to_int(dst_type, src)
elif dst_element_type.width == 1:
x = _ir_cast(src, ir.IntegerType.get_signless(64), signed=signed)
zero = _zeros_like(x)
return _ir_cast(_not_equal(x, zero, signed=signed), dst_type, signed=signed)
if isinstance(
src_element_type, ir.IntegerType
) and tt_dialect.PointerType.isinstance(dst_element_type):
return tt_dialect.int_to_ptr(dst_type, src)
if tt_dialect.PointerType.isinstance(
src_element_type
) and tt_dialect.PointerType.isinstance(dst_element_type):
return tt_dialect.bitcast(dst_type, src)
raise NotImplementedError(f"cannot cast {src} to {dst_type}")
@register_lowering(lax.convert_element_type_p)
def _convert_element_type_lowering_rule(
ctx: LoweringRuleContext, x, *, new_dtype, weak_type, sharding
):
[x_aval] = ctx.avals_in
x = _ensure_ir_value(x, x_aval)
if new_dtype == x_aval.dtype:
return x
return _cast(x, x_aval.dtype, new_dtype)
@register_lowering(lax.select_n_p)
def select_n_lowering_rule(ctx: LoweringRuleContext, pred, x, y):
pred_aval, a_aval, b_aval = ctx.avals_in
[out_aval] = ctx.avals_out
pred, x = _bcast(pred, x, pred_aval, a_aval, out_aval)
pred, y = _bcast(pred, y, pred_aval, b_aval, out_aval)
return arith_dialect.select(pred, y, x)
@register_lowering(lax.broadcast_in_dim_p)
def _broadcast_in_dim_lowering_rule(
ctx: LoweringRuleContext, x, *, broadcast_dimensions, shape, sharding
):
del sharding
x = _ensure_ir_value(x, *ctx.avals_in)
if not ir.RankedTensorType.isinstance(x.type):
return _bcast_to(x, shape)
expand_dims = [i for i in range(len(shape)) if i not in broadcast_dimensions]
for dim in expand_dims:
x = _expand_dims(x, dim)
return _bcast_to(x, shape)
@register_lowering(lax.squeeze_p)
def _squeeze_lowering_rule(ctx: LoweringRuleContext, a, *, dimensions):
del dimensions
return _reshape_lowering_rule(ctx, a, new_sizes=None, dimensions=None, sharding=None)
@register_lowering(lax.reshape_p)
def _reshape_lowering_rule(
ctx: LoweringRuleContext, a, *, new_sizes, dimensions, sharding,
):
del new_sizes # Unused.
if dimensions is not None:
return ValueError("`dimensions` is not supported.")
a = _ensure_ir_value(a, *ctx.avals_in)
[a_aval] = ctx.avals_in
[out_aval] = ctx.avals_out
# Triton Reshape doesn't support scalar result types (only 0d tensors).
if out_aval.ndim == 0:
return _reduce_lowering(jnp.add, ctx, a, axes=tuple(range(a_aval.ndim)))
return _reshape(a, out_aval.shape)
def _reshape(a: ir.Value, shape: Sequence[int]) -> ir.Value:
if not ir.RankedTensorType.isinstance(a.type):
assert all(dim_size == 1 for dim_size in shape)
return _splat(a, shape)
ty = ir.RankedTensorType(a.type)
return tt_dialect.reshape(
ir.RankedTensorType.get(shape, ty.element_type, ty.encoding),
a,
allow_reorder=False,
)
def get_join_type(old_type: ir.RankedTensorType):
shape = old_type.shape
shape.append(2)
return ir.RankedTensorType.get(shape, old_type.element_type, old_type.encoding)
@register_lowering(lax.concatenate_p)
def _concatenate_lowering_rule(ctx: LoweringRuleContext, *args, dimension):
if len(args) != 2:
raise NotImplementedError("Only 2-argument concatenate is supported.")
x_aval, y_aval = ctx.avals_in
x, y = args
if dimension != x_aval.ndim-1:
raise NotImplementedError(
"Only concatenate along the last dimension is supported."
)
if x_aval.shape[-1] != 1 or y_aval.shape[-1] != 1:
raise NotImplementedError(
"Only arguments with shape [..., 1] are supported."
)
lhs = _reshape(x, x_aval.shape[:-1])
rhs = _reshape(y, y_aval.shape[:-1])
ret_type = get_join_type(ir.RankedTensorType(rhs.type))
return tt_dialect.join(ret_type, lhs, rhs)
@register_lowering(lax.split_p)
def _split_lowering_rule(ctx: LoweringRuleContext, x, *, sizes, axis):
pass
# TODO(cjfj): Add support for larger powers of 2.
num_parts = len(sizes)
if num_parts != pallas_utils.next_power_of_2(num_parts):
raise NotImplementedError("Only power-of-2 num parts supported.")
if any(size != sizes[0] for size in sizes):
raise NotImplementedError("Only equal-sized splits are supported.")
def split_into_2(x):
shape = ir.RankedTensorType(x.type).shape
x = _reshape(x, shape[:axis] + [2, shape[axis] // 2] + shape[axis + 1 :])
permutation = tuple(d for d in range(len(shape) + 1) if d != axis) + (axis,)
return tuple(tt_dialect.split(tt_dialect.trans(x, permutation)))
x_parts = (x,)
while len(x_parts) < num_parts:
x_parts = sum(map(split_into_2, x_parts), ())
return x_parts
def _compute_offsets_from_indices(
block_info: BlockInfo, nd_indexer: NDIndexer
) -> ir.Value:
full_shape = block_info.full_shape_dtype.shape
num_squeezed_dims = sum(isinstance(b, pallas_core.Squeezed)
for b in block_info.block_shape)
strides = pallas_utils.strides_from_shape(full_shape)
indexer_shape = nd_indexer.get_indexer_shape()
int_indexer_shape = nd_indexer.int_indexer_shape
_check_tensor_size(indexer_shape)
indices = nd_indexer.indices
other_shape = indexer_shape[len(int_indexer_shape) :]
other_shape_idx = 0
assert len(indices) + num_squeezed_dims == len(full_shape)
assert len(block_info.start_indices) == len(full_shape)
array_dtype = jnp.dtype(block_info.full_shape_dtype.dtype)
full_size = math.prod(full_shape) * array_dtype.itemsize
# Use 64-bit indexing when offset might be >= 2**32 bytes.
offset_eltype = ir.IntegerType.get_signless(64 if full_size > 2**32 else 32)
if indexer_shape:
offsets = _zeros(ir.RankedTensorType.get(indexer_shape, offset_eltype))
else:
offsets = _ir_constant(0, offset_eltype)
indexer_iter = iter(indices)
for dim_stride, dim_block_size, start_offset in zip(
strides, block_info.block_shape, block_info.start_indices
):
match dim_block_size:
case pallas_core.Squeezed():
index = _ir_constant(0, offset_eltype)
case int():
index = next(indexer_iter)
if isinstance(index, slice):
index = primitives.Slice.from_slice(index, dim_block_size)
if isinstance(index, primitives.Slice):
if index.is_dynamic_start or (index.stride != 1):
start = index.start
if not index.is_dynamic_start:
start = _ir_constant(start, offset_eltype)
start = _ir_cast(start, offset_eltype, signed=False)
iota = _ir_cast(_make_range(0, index.size), offset_eltype, signed=False)
if index.stride != 1:
iota = _mul(iota, _full(iota.type, index.stride))
dim_offsets = _add(_bcast_to(start, [index.size]), iota)
else:
iota = _make_range(index.start, index.start + index.size)
dim_offsets = _ir_cast(iota, offset_eltype, signed=False)
other_shape_idx += 1
for _ in other_shape[other_shape_idx:]:
rank = ir.RankedTensorType(dim_offsets.type).rank
dim_offsets = _expand_dims(dim_offsets, rank)
else:
# indexer is either a *scalar* or an array of size `int_indexer_shape`
dim_offsets = index
if not isinstance(dim_offsets, ir.Value):
dim_offsets = _ir_constant(dim_offsets, offset_eltype)
dim_offsets = _ir_cast(dim_offsets, offset_eltype, signed=False)
if ir.RankedTensorType.isinstance(dim_offsets.type):
for _ in other_shape:
rank = ir.RankedTensorType(dim_offsets.type).rank
dim_offsets = _expand_dims(dim_offsets, rank)
if ir.RankedTensorType.isinstance(dim_offsets.type):
rank = ir.RankedTensorType(dim_offsets.type).rank
for _ in range(len(indexer_shape) - rank):
dim_offsets = _expand_dims(dim_offsets, 0)
dim_offsets = _bcast_to(dim_offsets, indexer_shape)
if start_offset is not None:
start_offset = _ir_cast(start_offset, offset_eltype, signed=False)
dim_offsets = _add(dim_offsets, _bcast_to(start_offset, indexer_shape))
dim_offsets = _mul(dim_offsets, _full(dim_offsets.type, dim_stride))
offsets = _add(offsets, dim_offsets)
return offsets
def _compute_pointers_from_indices(
root_ptr: ir.Value, block_info: BlockInfo, nd_indexer: NDIndexer
) -> ir.Value:
offsets = _compute_offsets_from_indices(block_info, nd_indexer)
return _add(_bcast_to(root_ptr, nd_indexer.get_indexer_shape()), offsets)
@register_lowering(sp.get_p)
def _get_lowering_rule(ctx: LoweringRuleContext, ptr, *idx, tree):
indexers = tree_util.tree_unflatten(tree, idx)
if not tt_dialect.PointerType.isinstance(ptr.type):
assert len(indexers) == 0
return ptr
args_flat, args_tree = tree_util.tree_flatten((ptr, indexers, None, None))
return _masked_load_lowering_rule(
ctx,
*args_flat,
args_tree=args_tree,
eviction_policy=None,
cache_modifier=None,
is_volatile=False,
)
_STR_TO_EVICTION_POLICY = {str(e): e for e in tt_dialect.EvictionPolicy}
_STR_TO_CACHE_MODIFIER = {str(c): c for c in tt_dialect.CacheModifier}
def _load(
ptr: ir.Value,
mask: ir.Value | None = None,
other: ir.Value | None = None,
*,
cache_modifier: str | None = None,
eviction_policy: str | None = None,
is_volatile: bool = False,
) -> ir.Value:
if cache_modifier is None:
cache_modifier = tt_dialect.CacheModifier.NONE
elif cache_modifier == ".ca" or cache_modifier == ".cg":
cache_modifier = _STR_TO_CACHE_MODIFIER[cache_modifier]
else:
raise ValueError(f"unsupported cache modifier: {cache_modifier}")
if eviction_policy is None:
eviction_policy = tt_dialect.EvictionPolicy.NORMAL
else:
try:
eviction_policy = _STR_TO_EVICTION_POLICY[eviction_policy]
except KeyError:
raise ValueError(
f"unsupported eviction policy: {eviction_policy}"
) from None
if tt_dialect.PointerType.isinstance(ptr.type):
ptr_type = tt_dialect.PointerType(ptr.type)
if ir.RankedTensorType.isinstance(ptr_type.pointee_type):
raise NotImplementedError("loading from a block pointer is not supported")
ptr_type = _element_type(ptr.type)
if not tt_dialect.PointerType.isinstance(ptr_type):
raise ValueError(f"unsupported pointer type: {ptr_type}")
ptr_type = tt_dialect.PointerType(ptr_type)
if other is not None and mask is None:
raise ValueError("other requires mask to be provided")
if not ir.RankedTensorType.isinstance(ptr.type):
if other is not None and ir.RankedTensorType.isinstance(other.type):
raise ValueError("other cannot be a block if pointer is not a block")
if mask is not None and ir.RankedTensorType.isinstance(mask.type):
raise ValueError("mask cannot be a block if pointer is not a block")
pointee_type = ptr_type.pointee_type
is_int1 = isinstance(pointee_type, ir.IntegerType) and pointee_type.width == 1
if is_int1:
pointee_type = ir.IntegerType.get_signless(8)
ptr = _ir_cast(
ptr,
tt_dialect.PointerType.get(pointee_type, ptr_type.address_space),
signed=False,
)
if other is not None:
other = _ir_cast(other, pointee_type, signed=False)
result = tt_dialect.load(
ptr,
mask=mask,
other=other,
cache=cache_modifier,
evict=eviction_policy,
is_volatile=is_volatile,
)
return (
result
if not is_int1
else _ir_cast(result, ir.IntegerType.get_signless(1), signed=False)
)
def _is_contiguous_int4(block_info: BlockInfo, nd_indexer: NDIndexer) -> bool:
"""Returns True if the block is contiguous in the last dimension."""
# In order to loaded as `uint8` the index must be an aligned slice.
return (
block_info.full_shape_dtype.dtype in (jnp.int4, jnp.uint4)
and block_info.start_indices_alignment
and (block_info.start_indices_alignment[-1] % 2 == 0)
and isinstance(slc := nd_indexer.indices[-1], indexing.Slice)
and isinstance(slc.start, int)
and isinstance(slc.size, int)
and (slc.start % 2 == 0)
and (slc.size % 2 == 0)
and (slc.stride == 1)
)
def _reinterpret_int4_as_uint8(
block_info: BlockInfo, nd_indexer: NDIndexer
) -> tuple[BlockInfo, NDIndexer]:
"""Returns a new block info and indexer that reads `int4` as `uint8`."""
last_idx = nd_indexer.indices[-1]
new_last_idx = indexing.Slice(last_idx.start // 2, last_idx.size // 2)
new_indices = (*nd_indexer.indices[:-1], new_last_idx)
new_shape = (*nd_indexer.shape[:-1], nd_indexer.shape[-1] // 2)
idx = dataclasses.replace(nd_indexer, indices=new_indices, shape=new_shape)
full_shape = block_info.full_shape_dtype.shape
new_full_shape = (*full_shape[:-1], full_shape[-1] // 2)
start_idx = block_info.start_indices[-1]
new_start_idx = _floordiv(start_idx, _full(start_idx.type, 2), signed=False)
new_start_indices = (*block_info.start_indices[:-1], new_start_idx)
block_info = dataclasses.replace(
block_info,
full_shape_dtype=jax.ShapeDtypeStruct(new_full_shape, jnp.uint8),
start_indices=new_start_indices,
)
return block_info, idx
@register_lowering(primitives.load_p)
def _masked_load_lowering_rule(
ctx: LoweringRuleContext,
*args_flat,
args_tree,
eviction_policy,
cache_modifier,
is_volatile,
):
block_info, *_ = ctx.block_infos
assert block_info is not None
ptr, indexers, mask, other = args_tree.unflatten(args_flat)
*_, mask_aval, other_aval = args_tree.unflatten(ctx.avals_in)
if len(indexers) > 1:
raise NotImplementedError("No support for multiple indexers yet.")
indexers = list(indexers)
if not indexers:
ref_shape = state.get_transforms_shape(indexers, ctx.avals_in[0].shape)
idx = NDIndexer.make_trivial_indexer(ref_shape)
else:
idx = indexers[0]
if not tt_dialect.PointerType.isinstance(ptr.type):
assert len(ctx.avals_in) == 1
return ptr
is_int4 = block_info.full_shape_dtype.dtype in (jnp.int4, jnp.uint4)
is_contiguous_int4 = _is_contiguous_int4(block_info, idx)
if is_contiguous_int4:
# If the load reads contiguously in the last dimension, we can reinterpret
# the `int4` block as `uint8`. This generates much more efficient code. The
# more generic `int4` code below has offsets like `0, 0, 1, 1, ...`, which
# Triton doesn't optimize as well.
block_info, idx = _reinterpret_int4_as_uint8(block_info, idx)
offsets = _compute_offsets_from_indices(block_info, idx)
ptr_offsets = offsets
if is_int4 and not is_contiguous_int4:
ptr_offsets = _floordiv(offsets, _full(offsets.type, 2), signed=False)
shape = idx.get_indexer_shape()
ptr = _add(_bcast_to(ptr, shape), ptr_offsets)
if mask is not None:
mask = _bcast_to(_ensure_ir_value(mask, mask_aval), shape)
if other is not None:
other = _bcast_to(_ensure_ir_value(other, other_aval), shape)
values = _load(
ptr,
mask=mask,
other=other,
cache_modifier=cache_modifier,
is_volatile=is_volatile,
eviction_policy=eviction_policy,
)
if not is_int4:
return values
if is_contiguous_int4:
msb_values = arith_dialect.shrui(values, _full(values.type, 4))
join_type = get_join_type(ir.RankedTensorType(values.type))
values = tt_dialect.join(join_type, values, msb_values)
shape = ir.RankedTensorType(values.type).shape
values = _reshape(values, (*shape[:-2], shape[-2] * shape[-1]))
else:
offsets = _ir_cast(offsets, ir.IntegerType.get_signless(32), signed=False)
in_msb = _mod(offsets, _full(offsets.type, 2), signed=False)
shift = _mul(in_msb, _full(in_msb.type, 4))
shift = _ir_cast(shift, values.type, signed=False)
values = arith_dialect.shrui(values, shift)
return _ir_cast(values, ir.IntegerType.get_signless(4), signed=False)
@register_lowering(sp.swap_p)
def _swap_lowering_rule(ctx: LoweringRuleContext, ptr, value, *idx, tree):
indexers = tree_util.tree_unflatten(tree, idx)
if not tt_dialect.PointerType.isinstance(ptr.type):
assert len(indexers) == 0
return ptr
if len(indexers) > 1:
raise NotImplementedError("No support for multiple indexers yet.")
args_flat, args_tree = tree_util.tree_flatten((ptr, indexers, value, None))
return _masked_swap_lowering_rule(
ctx, *args_flat, args_tree=args_tree, eviction_policy=None
)
def _store(
ptr: ir.Value,
value: ir.Value,
mask: ir.Value | None = None,
*,
cache_modifier: str | None = None,
eviction_policy: str | None = None,
) -> ir.Value:
if cache_modifier is None:
cache_modifier = tt_dialect.CacheModifier.NONE
elif cache_modifier != ".ca":
cache_modifier = _STR_TO_CACHE_MODIFIER[cache_modifier]
else:
raise ValueError(f"unsupported cache modifier: {cache_modifier}")
if eviction_policy is None:
eviction_policy = tt_dialect.EvictionPolicy.NORMAL
else:
try:
eviction_policy = _STR_TO_EVICTION_POLICY[eviction_policy]
except KeyError:
raise ValueError(
f"unsupported eviction policy: {eviction_policy}"
) from None
if tt_dialect.PointerType.isinstance(ptr.type):
ptr_type = tt_dialect.PointerType(ptr.type)
if ir.RankedTensorType.isinstance(ptr_type.pointee_type):
raise NotImplementedError("loading from a block pointer is not supported")
ptr_type = _element_type(ptr.type)
if not tt_dialect.PointerType.isinstance(ptr_type):
raise ValueError(f"unsupported pointer type: {ptr_type}")
ptr_type = tt_dialect.PointerType(ptr_type)
if not ir.RankedTensorType.isinstance(ptr.type):
if ir.RankedTensorType.isinstance(value.type):
raise ValueError("value cannot be a block if pointer is not a block")
if mask is not None and ir.RankedTensorType.isinstance(mask.type):
raise ValueError("mask cannot be a block if pointer is not a block")
pointee_type = ptr_type.pointee_type
if isinstance(pointee_type, ir.IntegerType) and pointee_type.width == 1:
pointee_type = ir.IntegerType.get_signless(8)
ptr = _ir_cast(
ptr,
tt_dialect.PointerType.get(pointee_type, ptr_type.address_space),
signed=False,
)
value = _ir_cast(value, pointee_type, signed=False)
return tt_dialect.store(
ptr, value, mask=mask, cache=cache_modifier, evict=eviction_policy
)
@register_lowering(primitives.swap_p)
def _masked_swap_lowering_rule(
ctx: LoweringRuleContext, *args_flat, args_tree, eviction_policy
):
block_info, *_ = ctx.block_infos
assert block_info is not None
ptr, indexers, value, mask = args_tree.unflatten(args_flat)
*_, value_aval, mask_aval = args_tree.unflatten(ctx.avals_in)
if len(indexers) > 1:
raise NotImplementedError("No support for multiple indexers yet.")
if not indexers:
ref_shape = state.get_transforms_shape(indexers, ctx.avals_in[0].shape)
idx = NDIndexer.make_trivial_indexer(ref_shape)
else:
idx = indexers[0]
ptr = _compute_pointers_from_indices(ptr, block_info, idx)
other = None
if value is not None:
value = _ensure_ir_value(value, value_aval)
if mask is not None:
mask = _bcast_to(_ensure_ir_value(mask, mask_aval), idx.get_indexer_shape())
if value is not None:
other = _bcast_to(value, idx.get_indexer_shape())
old_value = _load(ptr, mask=mask, other=other)
_store(ptr, value, mask=mask, eviction_policy=eviction_policy)
return old_value
@register_lowering(sp.addupdate_p)
def _addupdate_lowering_rule(ctx: LoweringRuleContext, ptr, value, *idx, tree):
block_info, *_ = ctx.block_infos
assert block_info is not None
indexers = tree_util.tree_unflatten(tree, idx)
if not tt_dialect.PointerType.isinstance(ptr.type):
assert len(indexers) == 0
return ptr
if len(indexers) > 1:
raise NotImplementedError("No support for multiple indexers yet.")
indexer = indexers[0]
ptr = _compute_pointers_from_indices(ptr, block_info, indexer)
op = tt_dialect.RMWOp.FADD
if isinstance(_element_type(value.type), ir.IntegerType):
op = tt_dialect.RMWOp.ADD
_atomic_rmw(op, ptr, value)
return []
@register_lowering(lax.transpose_p)
def _transpose_lowering(ctx: LoweringRuleContext, x, *, permutation):
return tt_dialect.trans(x, permutation)
_TF32_PRECISIONS = (lax.Precision.HIGH, lax.Precision.DEFAULT)
def _as_bf16(x):
return _ir_cast(x, _dtype_to_ir_type(jnp.bfloat16), signed=False)
def _as_f32(x):
return _ir_cast(x, _dtype_to_ir_type(jnp.float32), signed=False)
@register_lowering(lax.dot_general_p)
def _dot_general_lowering(
ctx: LoweringRuleContext,
a,
b,
*,
dimension_numbers,
out_sharding,
precision,
preferred_element_type,
):
del preferred_element_type, out_sharding # Unused.
((a_contract_dim,), (b_contract_dim,)), batch_dims = dimension_numbers
assert batch_dims == ((), ())
if a_contract_dim == 0:
a = tt_dialect.trans(a, (1, 0))
if b_contract_dim == 1:
b = tt_dialect.trans(b, (1, 0))
a_aval, b_aval = ctx.avals_in
[out_aval] = ctx.avals_out
if precision is None or (precision == lax.DotAlgorithmPreset.DEFAULT):
precision = (lax.Precision.DEFAULT, lax.Precision.DEFAULT)
if isinstance(precision, lax.DotAlgorithmPreset):
match precision:
case lax.DotAlgorithmPreset.TF32_TF32_F32:
input_precision = tt_dialect.InputPrecision.TF32
case lax.DotAlgorithmPreset.TF32_TF32_F32_X3:
input_precision = tt_dialect.InputPrecision.TF32x3
case lax.DotAlgorithmPreset.F32_F32_F32:
input_precision = tt_dialect.InputPrecision.IEEE
case (
lax.DotAlgorithmPreset.F16_F16_F16
| lax.DotAlgorithmPreset.F16_F16_F32
| lax.DotAlgorithmPreset.BF16_BF16_BF16
| lax.DotAlgorithmPreset.BF16_BF16_F32
| lax.DotAlgorithmPreset.BF16_BF16_F32_X3
| lax.DotAlgorithmPreset.BF16_BF16_F32_X6
| lax.DotAlgorithmPreset.BF16_BF16_F32_X9
):
input_precision = None
case _:
raise NotImplementedError(f"Unsupported dot algorithm: {precision}.")
a = _cast(a, a_aval.dtype, precision.supported_lhs_types[0])
b = _cast(b, b_aval.dtype, precision.supported_rhs_types[0])
acc_dtype = precision.accumulation_type
elif isinstance(precision, tuple):
a_precision, b_precision = precision
if a_precision in _TF32_PRECISIONS or b_precision in _TF32_PRECISIONS:
input_precision = tt_dialect.InputPrecision.TF32
elif a_aval.dtype == jnp.float32:
input_precision = tt_dialect.InputPrecision.IEEE
else:
input_precision = None
acc_dtype = out_aval.dtype
if acc_dtype != jnp.int32 and acc_dtype != jnp.float16:
acc_dtype = jnp.float32
else:
raise NotImplementedError(f"Unsupported dot precision: {precision}.")
a_type = ir.RankedTensorType(a.type)
b_type = ir.RankedTensorType(b.type)
if len(a_type.shape) != len(b_type.shape) != 2:
raise ValueError("a and b must be 2D, but got:"
f" {a_type.shape} and {b_type.shape}")
if min(*b_type.shape) < 16:
raise ValueError("all dimensions of b must be >= 16 ")
if a_type.element_type != b_type.element_type:
raise ValueError(
"a and b must have the same element type, but got:"
f" {a_type.element_type} and {b_type.element_type}"
)
m, _ = a_type.shape
_, n = b_type.shape
acc = _zeros(ir.RankedTensorType.get([m, n], _dtype_to_ir_type(acc_dtype)))
if precision in (
lax.DotAlgorithmPreset.BF16_BF16_F32_X3,
lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
lax.DotAlgorithmPreset.BF16_BF16_F32_X9,
):
a_bf16 = _as_bf16(a)
b_bf16 = _as_bf16(b)
a_err0 = _sub(a, _as_f32(a_bf16))
b_err0 = _sub(b, _as_f32(b_bf16))
a_err0_bf16 = _as_bf16(a_err0)
b_err0_bf16 = _as_bf16(b_err0)
a_err1_bf16 = _as_bf16(_sub(a_err0, _as_f32(a_err0_bf16)))
b_err1_bf16 = _as_bf16(_sub(b_err0, _as_f32(b_err0_bf16)))
# Accumulate the smallest values first to reduce the numeric error.
if precision == lax.DotAlgorithmPreset.BF16_BF16_F32_X9:
acc = tt_dialect.dot(a_err1_bf16, b_err0_bf16, acc)
acc = tt_dialect.dot(a_err1_bf16, b_err1_bf16, acc)
acc = tt_dialect.dot(a_err0_bf16, b_err1_bf16, acc)
if precision in (
lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
lax.DotAlgorithmPreset.BF16_BF16_F32_X9,
):
acc = tt_dialect.dot(a_err1_bf16, b_bf16, acc)
acc = tt_dialect.dot(a_bf16, b_err1_bf16, acc)
acc = tt_dialect.dot(a_err0_bf16, b_err0_bf16, acc)
acc = tt_dialect.dot(a_err0_bf16, b_bf16, acc)
acc = tt_dialect.dot(a_bf16, b_err0_bf16, acc)
# If `a` rounding error is zero and `b` is `inf` then `acc` may contain
# `NaN`s (as `0 * inf = NaN`), and vice versa.
acc = arith_dialect.select(_is_nan(acc), _zeros_like(acc), acc)
a, b = a_bf16, b_bf16
acc = tt_dialect.dot(a, b, acc, input_precision=input_precision)
return _cast(acc, acc_dtype, out_aval.dtype)
def _reduction_lowering(body, ctx: LoweringRuleContext, a, axes):
flat_args = tree_util.tree_leaves(a)
(axis,) = axes
mapped_avals = [jax_core.ShapedArray((), aval.dtype) for aval in ctx.avals_in]
in_tree = tree_util.tree_structure((a, a))
flat_fun, out_tree_thunk = api_util.flatten_fun_nokwargs(
lu.wrap_init(
body,
debug_info=api_util.debug_info("pallas triton reduction",
body, (a, a), {})),
in_tree
)
combine_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
flat_fun, [*mapped_avals, *mapped_avals]
)
out_tree = out_tree_thunk()
del out_tree # Not needed
if consts:
raise NotImplementedError("Reductions with constants not supported.")
element_types = [_element_type(arg.type) for arg in flat_args]
reduce_op = tt_dialect.ReduceOp(flat_args, axis)
param_types = element_types * 2
entry = reduce_op.regions[0].blocks.append(*param_types)
with ir.InsertionPoint.at_block_begin(entry):
results = lower_jaxpr_to_triton_ir(
ctx.context, combine_jaxpr, None, *entry.arguments
)
tt_dialect.reduce_return(results)
reduce_op.verify()
return list(reduce_op.result)
def _reduce_lowering(body, ctx: LoweringRuleContext, a, *, axes, **kwargs):
assert isinstance(axes, tuple)
if not axes:
return a
while len(axes) > 1:
axis = max(axes)
dst_avals = tuple(v.update(shape=v.shape[:axis] + v.shape[axis + 1:])
for v in ctx.avals_in)
a = _reduce_lowering(
body, ctx.replace(avals_out=dst_avals), a, axes=(axis,))
# Adding an intervening -(-reduce(.)) introduces a convert_layout between
# reduces, which seems necessary for correctness.
# TODO(bjp): Get rid of the double negation.
# https://github.com/openai/triton/issues/1776
a = _minus(_minus(a))
ctx = ctx.replace(avals_in=dst_avals)
axes = tuple(ax for ax in axes if ax != axis)
return _reduction_lowering(body, ctx, a, axes=axes)[0]
triton_lowering_rules[lax.reduce_max_p] = functools.partial(
_reduce_lowering, jnp.maximum
)
triton_lowering_rules[lax.reduce_min_p] = functools.partial(
_reduce_lowering, jnp.minimum
)
triton_lowering_rules[lax.reduce_sum_p] = functools.partial(
_reduce_lowering, jnp.add
)
def _argreduce_lowering(
body, ctx: LoweringRuleContext, a, *, axes, index_dtype
):
if index_dtype != jnp.int32:
raise ValueError("`index_type` must be i32.")
if len(axes) != 1:
raise ValueError("`pallas` reduce operations only support one reduce axis.")
[axis] = axes
[a_aval] = ctx.avals_in
index = _make_range(0, a_aval.shape[axis])
if len(a_aval.shape) > 1:
# Broadcast index across the non-reduced axes
for i in range(len(a_aval.shape)):
if i != axis:
index = _expand_dims(index, i)
index = _bcast_to(index, a_aval.shape)
ctx = ctx.replace(avals_in=[a_aval, a_aval.update(dtype=jnp.dtype(jnp.int32))])
_, indices = _reduction_lowering(body, ctx, (a, index), axes=axes)
return indices
def _reduce_argmax_combine(left, right):
value1, index1 = left
value2, index2 = right
gt = value1 > value2
lt = value1 < value2
index_min = jnp.minimum(index1, index2)
index_ret = jnp.where(gt, index1, jnp.where(lt, index2, index_min))
value_ret = jnp.maximum(value1, value2)
return value_ret, index_ret
triton_lowering_rules[lax.argmax_p] = functools.partial(
_argreduce_lowering, _reduce_argmax_combine
)
def _reduce_argmin_combine(left, right):
value1, index1 = left
value2, index2 = right
gt = value1 > value2
lt = value1 < value2
index_min = jnp.minimum(index1, index2)
index_ret = jnp.where(lt, index1, jnp.where(gt, index2, index_min))
value_ret = jnp.minimum(value1, value2)
return value_ret, index_ret
triton_lowering_rules[lax.argmin_p] = functools.partial(
_argreduce_lowering, _reduce_argmin_combine
)
@register_lowering(pjit.jit_p)
def _pjit_lowering_rule(ctx: LoweringRuleContext, *args, jaxpr, **_):
if jaxpr.consts:
raise NotImplementedError
return lower_jaxpr_to_triton_ir(
ctx.context, jaxpr.jaxpr, ctx.block_infos, *args
)
@register_lowering(pjit.reshard_p)
def _reshard_lowering_rule(ctx, x, dst_sharding):
return x
@register_lowering(jax_core.closed_call_p)
@register_lowering(custom_derivatives.custom_jvp_call_p)
def _closed_call_lowering_rule(
ctx: LoweringRuleContext, *args, call_jaxpr, **_
):
jaxpr, consts = call_jaxpr.jaxpr, call_jaxpr.consts
if consts:
raise NotImplementedError
return lower_jaxpr_to_triton_ir(ctx.context, jaxpr, ctx.block_infos, *args)
@register_lowering(ad_checkpoint.remat_p)
def _remat_lowering_rule(ctx: LoweringRuleContext, *args, jaxpr, **_):
return lower_jaxpr_to_triton_ir(ctx.context, jaxpr, ctx.block_infos, *args)
triton_lowering_rules[ad_util.stop_gradient_p] = lambda _, x: x
@register_lowering(lax.axis_index_p)
def _axis_index_rule(ctx: LoweringRuleContext, *, axis_name: Hashable):
grid_names = ctx.context.grid_mapping.grid_names
if axis_name in grid_names:
# We are querying a named axis corresponding to a grid dimension.
return _program_id_lowering_rule(ctx, axis=grid_names.index(axis_name))
raise LookupError(f"Axis name {axis_name} not found in grid.")
def _is_read_only(ref_effects) -> bool:
if len(ref_effects) == 0:
return True
if len(ref_effects) > 1:
# Means we must have a write or accum effect so not read-only
return False
(eff,) = ref_effects
return isinstance(eff, state.ReadEffect)
def _lower_jaxpr_to_for_loop(
ctx: LoweringRuleContext,
jaxpr: jax_core.Jaxpr,
lower_bound,
upper_bound,
consts,
*args,
has_loop_index: bool,
step: int = 1,
bound_type: ir.IntegerType | None = None,
):
if step != 1:
raise NotImplementedError
if bound_type is None or bound_type.width == 32:
step = _i32_constant(step)
else:
step = _i64_constant(step)
for_op = scf_dialect.ForOp(lower_bound, upper_bound, step, args)
with ir.InsertionPoint.at_block_begin(for_op.body):
loop_index = for_op.induction_variable
for_body_args = [for_op.body.arguments[i + 1] for i, _ in enumerate(args)]
if has_loop_index:
jaxpr_args = [*consts, loop_index, *for_body_args]
else:
jaxpr_args = [*consts, *for_body_args]
all_out = lower_jaxpr_to_triton_ir(
ctx.context, jaxpr, ctx.block_infos, *jaxpr_args
)
scf_dialect.yield_(all_out)
return list(for_op.results_)
@register_lowering(lax.scan_p)
def _scan_lowering_rule(
ctx: LoweringRuleContext,
*args,
jaxpr,
linear,
length,
reverse,
unroll,
num_consts,
num_carry,
_split_transpose,
):
del _split_transpose
# Only implements fori_loop-like scans
num_extensive = len(args) - num_consts - num_carry
if num_extensive: raise NotImplementedError
if reverse: raise NotImplementedError
if unroll != 1: raise NotImplementedError
del linear, num_extensive, unroll, reverse
jaxpr, jaxpr_consts = jaxpr.jaxpr, jaxpr.consts
if jaxpr_consts: raise NotImplementedError
del jaxpr_consts
jaxpr, has_loop_index = (
pallas_utils.pattern_match_scan_to_fori_loop(jaxpr, num_consts, num_carry)
)
args = map(_ensure_ir_value, args, ctx.avals_in)
consts, args = util.split_list(args, [num_consts])
if has_loop_index:
lower_bound, *args = args
upper_bound = _add(lower_bound, _ir_constant(length, lower_bound.type))
bound_type = lower_bound.type
else:
lower_bound = _i32_constant(0)
upper_bound = _i32_constant(length)
bound_type = ir.IntegerType.get_signless(32)
for_out = _lower_jaxpr_to_for_loop(
ctx, jaxpr, lower_bound, upper_bound, consts, *args,
has_loop_index=has_loop_index, step=1, bound_type=bound_type)
if has_loop_index:
# Need to return the final loop index value if the outer scan expects
# it as an output
return [upper_bound, *for_out]
return for_out
def _maybe_pattern_match_fori_loop(
ctx: LoweringRuleContext,
*args,
cond_nconsts,
cond_jaxpr,
body_nconsts,
body_jaxpr,
):
if cond_nconsts:
return None
_, cond_invars = split_list(cond_jaxpr.jaxpr.invars, [cond_nconsts])
cond_in_avals = [v.aval for v in cond_invars]
if len(cond_in_avals) < 2:
return None
# Check that the first two carry values are scalar ints
a1, a2 = cond_in_avals[:2]
if a1.shape != () or a1.dtype not in (jnp.int32, jnp.int64):
return None
if a2.shape != () or a2.dtype not in (jnp.int32, jnp.int64):
return None
# Check that the only eqn in the cond checks the loop index condition
v1, v2 = cond_invars[:2]
outvar = cond_jaxpr.jaxpr.outvars[0]
assert outvar.aval.dtype == jnp.bool_
if len(cond_jaxpr.jaxpr.eqns) != 1:
return None
eqn = cond_jaxpr.jaxpr.eqns[0]
if eqn.primitive != lax.lt_p:
return None
if eqn.outvars != [outvar]:
return None
if eqn.invars != [v1, v2]:
return None
# Check that the carry is updated in the body appropriately
_, body_invars = split_list(body_jaxpr.jaxpr.invars, [body_nconsts])
v1, v2 = body_invars[:2]
vo1, vo2 = body_jaxpr.jaxpr.outvars[:2]
# Upper bound should be constant
if v2 is not vo2:
return None
# Check that we increment the loop index in the body
for i, eqn in enumerate(body_jaxpr.jaxpr.eqns):
if eqn.primitive is lax.add_p:
if eqn.invars[0] is v1:
if isinstance(eqn.invars[1], jax_core.Literal):
if eqn.invars[1].val == 1:
if eqn.outvars[0] == vo1:
eqn_index = i
break
else:
return None
jaxpr = body_jaxpr.jaxpr
new_invars = (*jaxpr.invars[:body_nconsts],
jaxpr.invars[body_nconsts],
*jaxpr.invars[body_nconsts + 2:])
new_outvars = tuple(jaxpr.outvars[2:])
jaxpr = jaxpr.replace(
eqns=jaxpr.eqns[:eqn_index] + jaxpr.eqns[eqn_index + 1:],
invars=new_invars,
outvars=new_outvars,
debug_info=jaxpr.debug_info.with_unknown_names())
_, body_consts, carry = split_list(args, [cond_nconsts, body_nconsts])
(lb, ub), args = carry[:2], carry[2:]
const_block_infos, args_block_infos = split_list(ctx.block_infos,
[body_nconsts])
ctx = ctx.replace(block_infos=[*const_block_infos, None,
*args_block_infos[2:]])
for_out = _lower_jaxpr_to_for_loop(
ctx,
jaxpr,
lb,
ub,
body_consts,
*args,
has_loop_index=True,
step=1,
bound_type=lb.type,
)
return [ub, ub, *for_out]
@register_lowering(lax.while_p)
def _while_lowering_rule(
ctx: LoweringRuleContext,
*args,
cond_nconsts,
cond_jaxpr,
body_nconsts,
body_jaxpr,
):
args = map(_ensure_ir_value, args, ctx.avals_in)
# First, try to pattern match to fori_loop and lower to scf.for if possible
# TODO(slebedev): Use `pallas_utils.pattern_match_while_to_fori_loop`.
result = _maybe_pattern_match_fori_loop(ctx, *args, cond_nconsts=cond_nconsts,
body_nconsts=body_nconsts, cond_jaxpr=cond_jaxpr,
body_jaxpr=body_jaxpr)
if result is not None:
return result
# Fall back to default while lowering
cond_consts, body_consts, carry = util.split_list(
args, [cond_nconsts, body_nconsts]
)
cond_const_block_infos, body_const_block_infos, carry_block_infos = (
util.split_list(ctx.block_infos, [cond_nconsts, body_nconsts])
)
cond_const_types = [a.type for a in cond_consts]
body_const_types = [a.type for a in body_consts]
carry_types = [a.type for a in carry]
all_types = [*cond_const_types, *body_const_types, *carry_types]
while_op = scf_dialect.WhileOp(all_types, args)
before_block = while_op.before.blocks.append(*all_types)
cond_consts_, _, carry_ = util.split_list(
before_block.arguments,
[cond_nconsts, body_nconsts],
)
cond_args = [*cond_consts_, *carry_]
with ir.InsertionPoint.at_block_begin(before_block):
[cond] = lower_jaxpr_to_triton_ir(
ctx.context,
cond_jaxpr.jaxpr,
[*cond_const_block_infos, *carry_block_infos],
*cond_args,
)
scf_dialect.condition(cond, before_block.arguments)
after_block = while_op.after.blocks.append(*all_types)
cond_consts_, body_consts_, carry_ = util.split_list(
after_block.arguments,
[cond_nconsts, body_nconsts],
)
all_args = [*cond_consts_, *body_consts_, *carry_]
cond_const_args, body_const_args, carry_args = util.split_list(
all_args, [cond_nconsts, body_nconsts]
)
with ir.InsertionPoint.at_block_begin(after_block):
loop_out = lower_jaxpr_to_triton_ir(
ctx.context,
body_jaxpr.jaxpr,
[*body_const_block_infos, *carry_block_infos],
*body_const_args,
*carry_args
)
all_handles = [*cond_const_args, *body_const_args, *loop_out]
if all_handles:
scf_dialect.yield_(all_handles)
all_out = list(while_op.results_)
return all_out[cond_nconsts + body_nconsts :]
@register_lowering(lax.cond_p)
def _cond_lowering_rule(
ctx: LoweringRuleContext,
index,
*args, # *consts, *ops
branches, # tuple(jaxprs)
):
block_infos = ctx.block_infos
def to_type(out_aval):
element_type = _dtype_to_ir_type(out_aval.dtype)
if not out_aval.shape:
return element_type
return ir.RankedTensorType.get(out_aval.shape, element_type)
out_types = [to_type(out) for out in ctx.avals_out]
use_branch0 = _equal(index, _ir_constant(0, index.type), signed=False)
# TODO(bjp): Switch to scf.index_switch once exposed in triton.cc
if_op = scf_dialect.IfOp(use_branch0, out_types, hasElse=True)
with ir.InsertionPoint.at_block_begin(if_op.then_block):
outs0 = lower_jaxpr_to_triton_ir(
ctx.context,
branches[0].jaxpr,
block_infos[1:],
*args)
scf_dialect.yield_(outs0)
with ir.InsertionPoint.at_block_begin(if_op.else_block):
# TODO(bjp): Instead of linear nest of 'if's, partition into halves.
if len(branches) > 2:
outs1 = _cond_lowering_rule(
ctx,
_sub(index, _ir_constant(1, index.type)),
*args,
branches=branches[1:],
)
else:
outs1 = lower_jaxpr_to_triton_ir(
ctx.context,
branches[1].jaxpr,
block_infos[1:],
*args)
scf_dialect.yield_(outs1)
return list(if_op.results_)
def _ensure_ir_value(x: object, aval: jax_core.ShapedArray) -> ir.Value:
if isinstance(x, ir.Value):
return x
elif isinstance(
x, (np.number, np.ndarray, int, float, literals.TypedNdArray)
):
return _ir_constant(x, _dtype_to_ir_type(aval.dtype))
raise NotImplementedError
def _ir_constant(v: object, t: ir.Type) -> ir.Value:
if isinstance(
v, (np.number, np.ndarray, int, float, literals.TypedNdArray)
):
if isinstance(t, ir.IntegerType):
v = int(v)
else:
assert isinstance(t, ir.FloatType)
v = float(v)
return arith_dialect.constant(t, v)
raise NotImplementedError
def _i32_constant(v: int) -> ir.Value:
return arith_dialect.constant(ir.IntegerType.get_signless(32), v)
def _i64_constant(v: int) -> ir.Value:
return arith_dialect.constant(ir.IntegerType.get_signless(64), v)
def _dtype_to_ir_type(dtype: jax.typing.DTypeLike) -> ir.Type:
dtype = jnp.dtype(dtype)
if jnp.issubdtype(dtype, np.integer):
# All integer types in Triton are signless.
return ir.IntegerType.get_signless(dtype.itemsize * 8)
return mlir.dtype_to_ir_type(dtype)
@register_lowering(lax.bitcast_convert_type_p)
def _bitcast_convert_type_lowering_rule(
ctx: LoweringRuleContext, operand: ir.Value, *, new_dtype
) -> ir.Value:
# TODO(petebu) Handle case where src and dst types have different bitwidths
src_elem_type = _element_type(operand.type)
dst_elem_type = _element_type(_dtype_to_ir_type(new_dtype))
assert isinstance(src_elem_type, (ir.IntegerType, ir.FloatType))
assert isinstance(dst_elem_type, (ir.IntegerType, ir.FloatType))
if src_elem_type.width != dst_elem_type.width:
raise NotImplementedError(
f"cannot cast {operand} to {new_dtype} because of different widths"
)
if ir.RankedTensorType.isinstance(operand.type):
shape = ir.RankedTensorType(operand.type).shape
result_type = ir.RankedTensorType.get(shape, dst_elem_type)
else:
result_type = dst_elem_type
return tt_dialect.bitcast(result_type, operand)
| _Fallback |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 37831,
"end": 38331
} | class ____(SchemaLike[Literal["object"]], Protocol):
"""
Represents the wrapped state of a conditional encoding or property.
Attributes
----------
condition
One or more (predicate, statement) pairs which each form a condition.
Notes
-----
- Can be extended with additional conditions.
- *Does not* define a default value, but can be finalized with one.
"""
condition: Any
_schema: _TypeMap[Literal["object"]] = {"type": "object"}
| ConditionLike |
python | doocs__leetcode | solution/2100-2199/2166.Design Bitset/Solution.py | {
"start": 0,
"end": 1009
} | class ____:
def __init__(self, size: int):
self.a = ['0'] * size
self.b = ['1'] * size
self.cnt = 0
def fix(self, idx: int) -> None:
if self.a[idx] == '0':
self.a[idx] = '1'
self.cnt += 1
self.b[idx] = '0'
def unfix(self, idx: int) -> None:
if self.a[idx] == '1':
self.a[idx] = '0'
self.cnt -= 1
self.b[idx] = '1'
def flip(self) -> None:
self.a, self.b = self.b, self.a
self.cnt = len(self.a) - self.cnt
def all(self) -> bool:
return self.cnt == len(self.a)
def one(self) -> bool:
return self.cnt > 0
def count(self) -> int:
return self.cnt
def toString(self) -> str:
return ''.join(self.a)
# Your Bitset object will be instantiated and called as such:
# obj = Bitset(size)
# obj.fix(idx)
# obj.unfix(idx)
# obj.flip()
# param_4 = obj.all()
# param_5 = obj.one()
# param_6 = obj.count()
# param_7 = obj.toString()
| Bitset |
python | django__django | django/template/loaders/base.py | {
"start": 61,
"end": 1636
} | class ____:
def __init__(self, engine):
self.engine = engine
def get_template(self, template_name, skip=None):
"""
Call self.get_template_sources() and return a Template object for
the first template matching template_name. If skip is provided, ignore
template origins in skip. This is used to avoid recursion during
template extending.
"""
tried = []
for origin in self.get_template_sources(template_name):
if skip is not None and origin in skip:
tried.append((origin, "Skipped to avoid recursion"))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, "Source does not exist"))
continue
else:
return Template(
contents,
origin,
origin.template_name,
self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
def get_template_sources(self, template_name):
"""
An iterator that yields possible matching template paths for a
template name.
"""
raise NotImplementedError(
"subclasses of Loader must provide a get_template_sources() method"
)
def reset(self):
"""
Reset any state maintained by the loader instance (e.g. cached
templates or cached loader modules).
"""
pass
| Loader |
python | huggingface__transformers | tests/pipelines/test_pipelines_automatic_speech_recognition.py | {
"start": 1754,
"end": 83306
} | class ____(unittest.TestCase):
model_mapping = dict(
(list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else [])
+ (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else [])
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
if tokenizer is None:
# Side effect of no Fast Tokenizer class for these model, so skipping
# But the slow tokenizer test should still run as they're quite small
self.skipTest(reason="No tokenizer available")
if model.can_generate():
extra_kwargs = {"max_new_tokens": 20}
else:
extra_kwargs = {}
speech_recognizer = AutomaticSpeechRecognitionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
**extra_kwargs,
)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return speech_recognizer, [audio, audio2]
def run_pipeline_test(self, speech_recognizer, examples):
audio = np.zeros((34000,))
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
compare_pipeline_output_to_hub_spec(outputs, AutomaticSpeechRecognitionOutput)
# Striding
audio = {"raw": audio, "stride": (0, 4000), "sampling_rate": speech_recognizer.feature_extractor.sampling_rate}
if speech_recognizer.type == "ctc":
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
elif "Whisper" in speech_recognizer.model.__class__.__name__:
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
else:
# Non CTC models cannot use striding.
with self.assertRaises(ValueError):
outputs = speech_recognizer(audio)
# Timestamps
audio = np.zeros((34000,))
if speech_recognizer.type == "ctc":
outputs = speech_recognizer(audio, return_timestamps="char")
self.assertIsInstance(outputs["chunks"], list)
n = len(outputs["chunks"])
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)],
},
)
outputs = speech_recognizer(audio, return_timestamps="word")
self.assertIsInstance(outputs["chunks"], list)
n = len(outputs["chunks"])
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)],
},
)
elif "Whisper" in speech_recognizer.model.__class__.__name__:
outputs = speech_recognizer(audio, return_timestamps=True)
self.assertIsInstance(outputs["chunks"], list)
nb_chunks = len(outputs["chunks"])
self.assertGreater(nb_chunks, 0)
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(nb_chunks)],
},
)
else:
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
outputs = speech_recognizer(audio, return_timestamps="char")
@require_torch
def test_pt_defaults(self):
pipeline("automatic-speech-recognition")
@require_torch
def test_small_model_pt(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-small-mustc-en-fr-st",
tokenizer="facebook/s2t-small-mustc-en-fr-st",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "(Applaudissements)"})
output = speech_recognizer(waveform, chunk_length_s=10)
self.assertEqual(output, {"text": "(Applaudissements)"})
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
_ = speech_recognizer(waveform, return_timestamps="char")
@require_torch
def test_small_model_pt_fp16(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-small-mustc-en-fr-st",
tokenizer="facebook/s2t-small-mustc-en-fr-st",
dtype=torch.float16,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "(Applaudissements)"})
output = speech_recognizer(waveform, chunk_length_s=10)
self.assertEqual(output, {"text": "(Applaudissements)"})
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
_ = speech_recognizer(waveform, return_timestamps="char")
@require_torch
def test_small_model_pt_bf16(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-small-mustc-en-fr-st",
tokenizer="facebook/s2t-small-mustc-en-fr-st",
dtype=torch.bfloat16,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "(Applaudissements)"})
output = speech_recognizer(waveform, chunk_length_s=10)
self.assertEqual(output, {"text": "(Applaudissements)"})
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
_ = speech_recognizer(waveform, return_timestamps="char")
@require_torch_accelerator
def test_whisper_fp16(self):
speech_recognizer = pipeline(
model="openai/whisper-tiny",
device=torch_device,
dtype=torch.float16,
max_new_tokens=5,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
speech_recognizer(waveform)
@require_torch
def test_small_model_pt_seq2seq(self):
speech_recognizer = pipeline(
model="hf-internal-testing/tiny-random-speech-encoder-decoder",
max_new_tokens=19,
num_beams=1,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"})
@require_torch
def test_small_model_pt_seq2seq_gen_kwargs(self):
speech_recognizer = pipeline(
model="hf-internal-testing/tiny-random-speech-encoder-decoder",
max_new_tokens=10,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform, generate_kwargs={"num_beams": 2})
self.assertEqual(output, {"text": "あл † γ ت ב オ 束 泣 足"})
@slow
@require_torch
@require_pyctcdecode
def test_large_model_pt_with_lm(self):
filename = hf_hub_download("Narsil/asr_dummy", filename="4.flac", repo_type="dataset")
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm",
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
output = speech_recognizer(filename)
self.assertEqual(
output,
{"text": "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumaje"},
)
# Override back to pure CTC
speech_recognizer.type = "ctc"
output = speech_recognizer(filename)
# plumajre != plumaje
self.assertEqual(
output,
{
"text": (
"y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajre"
)
},
)
speech_recognizer.type = "ctc_with_lm"
# Simple test with CTC with LM, chunking + timestamps
output = speech_recognizer(filename, chunk_length_s=2.0, return_timestamps="word")
self.assertEqual(
output,
{
"text": (
"y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajcri"
),
"chunks": [
{"text": "y", "timestamp": (0.52, 0.54)},
{"text": "en", "timestamp": (0.6, 0.68)},
{"text": "las", "timestamp": (0.74, 0.84)},
{"text": "ramas", "timestamp": (0.94, 1.24)},
{"text": "medio", "timestamp": (1.32, 1.52)},
{"text": "sumergidas", "timestamp": (1.56, 2.22)},
{"text": "revoloteaban", "timestamp": (2.36, 3.0)},
{"text": "algunos", "timestamp": (3.06, 3.38)},
{"text": "pájaros", "timestamp": (3.46, 3.86)},
{"text": "de", "timestamp": (3.92, 4.0)},
{"text": "quimérico", "timestamp": (4.08, 4.6)},
{"text": "y", "timestamp": (4.66, 4.68)},
{"text": "legendario", "timestamp": (4.74, 5.26)},
{"text": "plumajcri", "timestamp": (5.34, 5.74)},
],
},
)
# CTC + LM models cannot use return_timestamps="char"
with self.assertRaisesRegex(
ValueError, "^CTC with LM can only predict word level timestamps, set `return_timestamps='word'`$"
):
_ = speech_recognizer(filename, return_timestamps="char")
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_torch_small_no_tokenizer_files(self):
# test that model without tokenizer file cannot be loaded
with pytest.raises(OSError):
pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/tiny-wav2vec2-no-tokenizer",
)
@require_torch
@slow
def test_torch_large(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-base-960h",
tokenizer="facebook/wav2vec2-base-960h",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
@require_torch
@slow
def test_torch_large_with_input_features(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-audio/wav2vec2-bert-CV16-en",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": "a man said to the universe sir i exist"})
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_return_timestamps_in_preprocess(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
chunk_length_s=8,
stride_length_s=1,
)
data = load_dataset("openslr/librispeech_asr", "clean", split="test", streaming=True)
sample = next(iter(data))
res = pipe(sample["audio"]["array"])
self.assertEqual(res, {"text": " Conquered returned to its place amidst the tents."})
res = pipe(sample["audio"]["array"], return_timestamps=True)
self.assertEqual(
res,
{
"text": " Conquered returned to its place amidst the tents.",
"chunks": [{"timestamp": (0.0, 3.36), "text": " Conquered returned to its place amidst the tents."}],
},
)
res = pipe(sample["audio"]["array"], return_timestamps="word")
# fmt: off
self.assertEqual(
res,
{
'text': ' Conquered returned to its place amidst the tents.',
'chunks': [
{'text': ' Conquered', 'timestamp': (0.5, 1.2)},
{'text': ' returned', 'timestamp': (1.2, 1.64)},
{'text': ' to', 'timestamp': (1.64, 1.84)},
{'text': ' its', 'timestamp': (1.84, 2.02)},
{'text': ' place', 'timestamp': (2.02, 2.28)},
{'text': ' amidst', 'timestamp': (2.28, 2.8)},
{'text': ' the', 'timestamp': (2.8, 2.98)},
{'text': ' tents.', 'timestamp': (2.98, 3.48)},
],
},
)
# fmt: on
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_return_timestamps_and_language_in_preprocess(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
chunk_length_s=8,
stride_length_s=1,
return_language=True,
)
data = load_dataset("openslr/librispeech_asr", "clean", split="test", streaming=True)
sample = next(iter(data))
res = pipe(sample["audio"]["array"])
self.assertEqual(
res,
{
"text": " Conquered returned to its place amidst the tents.",
"chunks": [{"language": "english", "text": " Conquered returned to its place amidst the tents."}],
},
)
res = pipe(sample["audio"]["array"], return_timestamps=True)
self.assertEqual(
res,
{
"text": " Conquered returned to its place amidst the tents.",
"chunks": [
{
"timestamp": (0.0, 3.36),
"language": "english",
"text": " Conquered returned to its place amidst the tents.",
}
],
},
)
res = pipe(sample["audio"]["array"], return_timestamps="word")
# fmt: off
self.assertEqual(
res,
{
'text': ' Conquered returned to its place amidst the tents.',
'chunks': [
{"language": "english",'text': ' Conquered', 'timestamp': (0.5, 1.2)},
{"language": "english", 'text': ' returned', 'timestamp': (1.2, 1.64)},
{"language": "english",'text': ' to', 'timestamp': (1.64, 1.84)},
{"language": "english",'text': ' its', 'timestamp': (1.84, 2.02)},
{"language": "english",'text': ' place', 'timestamp': (2.02, 2.28)},
{"language": "english",'text': ' amidst', 'timestamp': (2.28, 2.8)},
{"language": "english",'text': ' the', 'timestamp': (2.8, 2.98)},
{"language": "english",'text': ' tents.', 'timestamp': (2.98, 3.48)},
],
},
)
# fmt: on
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_return_timestamps_in_preprocess_longform(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny.en",
)
data = load_dataset("openslr/librispeech_asr", "clean", split="test", streaming=True)
samples = [next(iter(data)) for _ in range(8)]
audio = np.concatenate([sample["audio"]["array"] for sample in samples])
res = pipe(audio)
expected_output = {
"text": " Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst "
"the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst "
"the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst "
"the tents. Concord returned to its place amidst the tents."
}
self.assertEqual(res, expected_output)
res = pipe(audio, return_timestamps=True)
self.assertEqual(
res,
{
"text": " Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents.",
"chunks": [
{"timestamp": (0.0, 3.22), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (3.22, 6.74), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (6.74, 10.26), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (10.26, 13.78), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (13.78, 17.3), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (17.3, 20.82), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (20.82, 24.34), "text": " Concord returned to its place amidst the tents."},
{"timestamp": (24.34, 27.86), "text": " Concord returned to its place amidst the tents."},
],
},
)
pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]]
res = pipe(audio, return_timestamps="word")
# fmt: off
self.assertEqual(
res["chunks"][:15],
[
{"text": " Concord", "timestamp": (0.5, 0.94)},
{"text": " returned", "timestamp": (0.94, 1.52)},
{"text": " to", "timestamp": (1.52, 1.78)},
{"text": " its", "timestamp": (1.78, 1.98)},
{"text": " place", "timestamp": (1.98, 2.16)},
{"text": " amidst", "timestamp": (2.16, 2.5)},
{"text": " the", "timestamp": (2.5, 2.9)},
{"text": " tents.", "timestamp": (2.9, 4.2)},
{"text": " Concord", "timestamp": (4.2, 4.5)},
{"text": " returned", "timestamp": (4.5, 5.0)},
{"text": " to", "timestamp": (5.0, 5.28)},
{"text": " its", "timestamp": (5.28, 5.48)},
{"text": " place", "timestamp": (5.48, 5.7)},
{"text": " amidst", "timestamp": (5.7, 6.02)},
{"text": " the", "timestamp": (6.02, 6.4)}
],
)
# fmt: on
@require_torch
def test_return_timestamps_in_init(self):
# segment-level timestamps are accepted
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
tokenizer = AutoTokenizer.from_pretrained("openai/whisper-tiny")
feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-tiny")
dummy_speech = np.ones(100)
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps=True,
max_new_tokens=1,
)
_ = pipe(dummy_speech)
# word-level timestamps are accepted
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps="word",
max_new_tokens=1,
)
_ = pipe(dummy_speech)
# char-level timestamps are not accepted
with self.assertRaisesRegex(
ValueError,
"^Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively.$",
):
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps="char",
max_new_tokens=1,
)
_ = pipe(dummy_speech)
@require_torch
@slow
def test_torch_whisper(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
num_beams=1,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."})
output = speech_recognizer([ds[40]["audio"]], chunk_length_s=5, batch_size=4)
self.assertEqual(output, [{"text": " A man said to the universe, Sir, I exist."}])
@require_torch
@slow
def test_torch_whisper_batched(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
num_beams=1,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:2]")
EXPECTED_OUTPUT = [
{"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."},
{"text": " Nor is Mr. Quilters' manner less interesting than his matter."},
]
audio_arrays = [x.get_all_samples().data for x in ds["audio"]]
output = speech_recognizer(audio_arrays, batch_size=2)
self.assertEqual(output, EXPECTED_OUTPUT)
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_whisper_timestamp_prediction(self):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
array = np.concatenate(
[ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]]
)
pipe = pipeline(
model="openai/whisper-small",
return_timestamps=True,
)
output = pipe(ds[40]["audio"])
self.assertDictEqual(
output,
{
"text": " A man said to the universe, Sir, I exist.",
"chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.26)}],
},
)
output = pipe(array, chunk_length_s=10)
self.assertDictEqual(
nested_simplify(output),
{
"chunks": [
{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)},
{
"text": (
" Sweat covered Brion's body, trickling into the "
"tight-loan cloth that was the only garment he wore, the "
"cut"
),
"timestamp": (5.5, 11.95),
},
{
"text": (
" on his chest still dripping blood, the ache of his "
"overstrained eyes, even the soaring arena around him "
"with"
),
"timestamp": (11.95, 19.61),
},
{
"text": " the thousands of spectators, retrievality is not worth thinking about.",
"timestamp": (19.61, 25.0),
},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (25.0, 29.4),
},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Brion's "
"body, trickling into the tight-loan cloth that was the only garment "
"he wore, the cut on his chest still dripping blood, the ache of his "
"overstrained eyes, even the soaring arena around him with the "
"thousands of spectators, retrievality is not worth thinking about. "
"His instant panic was followed by a small, sharp blow high on his "
"chest."
),
},
)
output = pipe(array)
self.assertDictEqual(
output,
{
"chunks": [
{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)},
{
"text": (
" Sweat covered Brion's body, trickling into the "
"tight-loan cloth that was the only garment"
),
"timestamp": (5.5, 10.18),
},
{"text": " he wore.", "timestamp": (10.18, 11.68)},
{"text": " The cut on his chest still dripping blood.", "timestamp": (11.68, 14.92)},
{"text": " The ache of his overstrained eyes.", "timestamp": (14.92, 17.6)},
{
"text": (
" Even the soaring arena around him with the thousands of spectators were trivialities"
),
"timestamp": (17.6, 22.56),
},
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Brion's "
"body, trickling into the tight-loan cloth that was the only garment "
"he wore. The cut on his chest still dripping blood. The ache of his "
"overstrained eyes. Even the soaring arena around him with the "
"thousands of spectators were trivialities not worth thinking about."
),
},
)
@slow
@require_torch
def test_whisper_large_timestamp_prediction(self):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
array = np.concatenate(
[ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]]
)
pipe = pipeline(model="openai/whisper-large-v3", return_timestamps=True, num_beams=1)
output = pipe(ds[40]["audio"])
self.assertDictEqual(
output,
{
"text": " A man said to the universe, Sir, I exist.",
"chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.08)}],
},
)
output = pipe(array, chunk_length_s=10)
self.assertDictEqual(
nested_simplify(output),
{
"chunks": [
{"timestamp": (0.0, 2.0), "text": (" A man said to the universe,")},
{"timestamp": (2.0, 4.1), "text": (" Sir, I exist.")},
{"timestamp": (5.14, 5.96), "text": (" Sweat covered")},
{"timestamp": (5.96, 8.02), "text": (" Breon's body, trickling into")},
{"timestamp": (8.02, 10.67), "text": (" the tight loincloth that was the only garment he wore,")},
{"timestamp": (10.67, 13.67), "text": (" the cut on his chest still dripping blood,")},
{"timestamp": (13.67, 17.61), "text": (" the ache of his overstrained eyes.")},
{
"timestamp": (17.61, 24.0),
"text": (
" Even the soaring arena around him with thousands of spectators were trivialities not worth thinking about."
),
},
{
"timestamp": (24.0, 29.94),
"text": (" His instant of panic was followed by a small, sharp blow high on his chest."),
},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Breon's"
" body, trickling into the tight loincloth that was the only garment"
" he wore, the cut on his chest still dripping blood, the ache of his"
" overstrained eyes. Even the soaring arena around him with thousands"
" of spectators were trivialities not worth thinking about. His "
"instant of panic was followed by a small, sharp blow high on his chest."
),
},
)
output = pipe(array)
self.assertDictEqual(
output,
{
"chunks": [
{"timestamp": (0.0, 1.96), "text": " A man said to the universe,"},
{"timestamp": (2.7, 4.1), "text": " Sir, I exist."},
{"timestamp": (5.14, 6.84), "text": " Sweat covered Brion's body,"},
{
"timestamp": (7.4, 10.68),
"text": " trickling into the tight loincloth that was the only garment he wore,",
},
{"timestamp": (11.6, 13.94), "text": " the cut on his chest still dripping blood,"},
{"timestamp": (14.78, 16.72), "text": " the ache of his overstrained eyes,"},
{
"timestamp": (17.32, 21.16),
"text": " even the soaring arena around him with the thousands of spectators",
},
{"timestamp": (21.16, 23.94), "text": " were trivialities not worth thinking about."},
{
"timestamp": (24.42, 29.94),
"text": " His instant panic was followed by a small sharp blow high on his chest.",
},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Brion's body,"
" trickling into the tight loincloth that was the only garment he wore, "
"the cut on his chest still dripping blood, the ache of his overstrained "
"eyes, even the soaring arena around him with the thousands of spectators "
"were trivialities not worth thinking about. His instant panic was followed "
"by a small sharp blow high on his chest."
),
},
)
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_whisper_word_timestamps_batched(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
chunk_length_s=3,
return_timestamps="word",
)
data = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = data[0]["audio"]
# not the same output as test_simple_whisper_asr because of chunking
EXPECTED_OUTPUT = {
"text": " Mr. Quilder is the apostle of the middle classes and we are glad to welcome his gospel.",
"chunks": [
{"text": " Mr.", "timestamp": (0.48, 0.96)},
{"text": " Quilder", "timestamp": (0.96, 1.24)},
{"text": " is", "timestamp": (1.24, 1.5)},
{"text": " the", "timestamp": (1.5, 1.72)},
{"text": " apostle", "timestamp": (1.72, 1.98)},
{"text": " of", "timestamp": (1.98, 2.32)},
{"text": " the", "timestamp": (2.32, 2.5)},
{"text": " middle", "timestamp": (2.5, 2.68)},
{"text": " classes", "timestamp": (2.68, 3.2)},
{"text": " and", "timestamp": (3.2, 3.56)},
{"text": " we", "timestamp": (3.56, 3.68)},
{"text": " are", "timestamp": (3.68, 3.8)},
{"text": " glad", "timestamp": (3.8, 4.1)},
{"text": " to", "timestamp": (4.1, 4.34)},
{"text": " welcome", "timestamp": (4.3, 4.6)},
{"text": " his", "timestamp": (4.6, 4.94)},
{"text": " gospel.", "timestamp": (4.94, 5.82)},
],
}
# batch size 1: copy the audio sample since pipeline consumes it
output = pipe(sample.copy(), batch_size=1)
self.assertDictEqual(output, EXPECTED_OUTPUT)
# batch size 2: input audio is chunked into smaller pieces so it's testing batching
output = pipe(sample, batch_size=2)
self.assertDictEqual(output, EXPECTED_OUTPUT)
@slow
@require_torch
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_whisper_large_word_timestamps_batched(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-large-v3",
return_timestamps="word",
)
data = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = data[0]["audio"]
# not the same output as test_simple_whisper_asr because of chunking
EXPECTED_OUTPUT = {
"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.",
"chunks": [
{"text": " Mr.", "timestamp": (0.0, 0.74)},
{"text": " Quilter", "timestamp": (0.74, 1.04)},
{"text": " is", "timestamp": (1.04, 1.3)},
{"text": " the", "timestamp": (1.3, 1.44)},
{"text": " apostle", "timestamp": (1.44, 1.74)},
{"text": " of", "timestamp": (1.74, 2.18)},
{"text": " the", "timestamp": (2.18, 2.28)},
{"text": " middle", "timestamp": (2.28, 2.5)},
{"text": " classes,", "timestamp": (2.5, 3.0)},
{"text": " and", "timestamp": (3.0, 3.4)},
{"text": " we", "timestamp": (3.4, 3.5)},
{"text": " are", "timestamp": (3.5, 3.6)},
{"text": " glad", "timestamp": (3.6, 3.84)},
{"text": " to", "timestamp": (3.84, 4.1)},
{"text": " welcome", "timestamp": (4.1, 4.4)},
{"text": " his", "timestamp": (4.4, 4.7)},
{"text": " gospel.", "timestamp": (4.7, 5.34)},
],
}
# batch size 1: copy the audio sample since pipeline consumes it
output = pipe(sample.copy(), batch_size=1)
self.assertDictEqual(output, EXPECTED_OUTPUT)
# batch size 2: input audio is chunked into smaller pieces so it's testing batching
output = pipe(sample, batch_size=2)
self.assertDictEqual(output, EXPECTED_OUTPUT)
@require_torch
@slow
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_torch_speech_encoder_decoder(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-wav2vec2-large-en-de",
feature_extractor="facebook/s2t-wav2vec2-large-en-de",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": 'Ein Mann sagte zum Universum : " Sir, ich existiert! "'})
@slow
@require_torch
def test_simple_wav2vec2(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = asr(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = asr(audio)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
data = Audio().encode_example(ds[40]["audio"])["bytes"]
output = asr(data)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
@slow
@require_torch
@require_torchaudio
def test_simple_s2t(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st")
tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st")
asr = AutomaticSpeechRecognitionPipeline(
model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, max_new_tokens=20
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = asr(waveform)
self.assertEqual(output, {"text": "(Applausi)"})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = asr(audio)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
data = Audio().encode_example(ds[40]["audio"])["bytes"]
output = asr(data)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
@slow
@require_torch
@require_torchaudio
@unittest.skip("TODO (joao, eustache): this test is failing, find the breaking PR and fix the cause or the test")
def test_simple_whisper_asr(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny.en",
num_beams=1,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio = ds[0]["audio"]
output = speech_recognizer(audio)
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."},
)
output = speech_recognizer(ds[0]["audio"], return_timestamps=True)
self.assertEqual(
output,
{
"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.",
"chunks": [
{
"text": (
" Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."
),
"timestamp": (0.0, 5.44),
}
],
},
)
speech_recognizer.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]]
output = speech_recognizer(ds[0]["audio"], return_timestamps="word")
# fmt: off
self.assertEqual(
output,
{
'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.',
'chunks': [
{'text': ' Mr.', 'timestamp': (0.38, 1.04)},
{'text': ' Quilter', 'timestamp': (1.04, 1.18)},
{'text': ' is', 'timestamp': (1.18, 1.44)},
{'text': ' the', 'timestamp': (1.44, 1.58)},
{'text': ' apostle', 'timestamp': (1.58, 1.98)},
{'text': ' of', 'timestamp': (1.98, 2.32)},
{'text': ' the', 'timestamp': (2.32, 2.46)},
{'text': ' middle', 'timestamp': (2.46, 2.56)},
{'text': ' classes,', 'timestamp': (2.56, 3.4)},
{'text': ' and', 'timestamp': (3.4, 3.54)},
{'text': ' we', 'timestamp': (3.54, 3.62)},
{'text': ' are', 'timestamp': (3.62, 3.72)},
{'text': ' glad', 'timestamp': (3.72, 4.0)},
{'text': ' to', 'timestamp': (4.0, 4.26)},
{'text': ' welcome', 'timestamp': (4.26, 4.56)},
{'text': ' his', 'timestamp': (4.56, 4.92)},
{'text': ' gospel.', 'timestamp': (4.92, 5.84)}
]
}
)
# fmt: on
# Whisper can only predict segment level timestamps or word level, not character level
with self.assertRaisesRegex(
ValueError,
"^Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively.$",
):
_ = speech_recognizer(audio, return_timestamps="char")
@slow
@require_torch
@require_torchaudio
def test_simple_whisper_translation(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-large",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."})
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large")
feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large")
speech_recognizer_2 = AutomaticSpeechRecognitionPipeline(
model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, max_new_tokens=20
)
output_2 = speech_recognizer_2(ds[40]["audio"])
self.assertEqual(output, output_2)
# either use generate_kwargs or set the model's generation_config
# model.generation_config.task = "transcribe"
# model.generation_config.lang = "<|it|>"
speech_translator = AutomaticSpeechRecognitionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
generate_kwargs={"task": "transcribe", "language": "<|it|>"},
max_new_tokens=20,
)
output_3 = speech_translator(ds[40]["audio"])
self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."})
@slow
@require_torch
def test_whisper_language(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny.en",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio = ds[0]["audio"]
# 1. English-only model compatible with no language argument
output = speech_recognizer(audio)
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."},
)
# 2. English-only Whisper does not accept the language argument
with self.assertRaisesRegex(
ValueError,
"Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, "
"pass `is_multilingual=True` to generate, or update the generation config.",
):
_ = speech_recognizer(ds[0]["audio"], generate_kwargs={"language": "en"})
# 3. Multilingual model accepts language argument
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
)
output = speech_recognizer(ds[0]["audio"], generate_kwargs={"language": "en"})
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."},
)
@slow
def test_speculative_decoding_whisper_non_distil(self):
# Load data:
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:1]")
sample = dataset[0]["audio"].get_all_samples().data
# Load model:
model_id = "openai/whisper-large-v2"
processor = AutoProcessor.from_pretrained(model_id)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
use_safetensors=True,
device_map="auto",
)
# Load assistant:
assistant_model_id = "openai/whisper-tiny"
assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained(
assistant_model_id,
use_safetensors=True,
device_map="auto",
)
# Load pipeline:
pipe = AutomaticSpeechRecognitionPipeline(
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
generate_kwargs={"language": "en"},
max_new_tokens=21,
num_beams=1,
)
transcription_ass = pipe(sample.clone().detach(), generate_kwargs={"assistant_model": assistant_model})["text"]
transcription_non_ass = pipe(sample)["text"]
self.assertEqual(transcription_ass, transcription_non_ass)
self.assertEqual(
transcription_ass,
" Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.",
)
@slow
def test_speculative_decoding_whisper_distil(self):
# Load data:
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:1]")
sample = dataset[0]["audio"]
# Load model:
model_id = "openai/whisper-large-v2"
processor = AutoProcessor.from_pretrained(model_id)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
use_safetensors=True,
device_map="auto",
)
# Load assistant:
assistant_model_id = "distil-whisper/distil-large-v2"
assistant_model = AutoModelForCausalLM.from_pretrained(
assistant_model_id,
use_safetensors=True,
device_map="auto",
)
# Load pipeline:
pipe = AutomaticSpeechRecognitionPipeline(
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
generate_kwargs={"language": "en"},
max_new_tokens=21,
num_beams=1,
)
transcription_non_ass = pipe(sample, generate_kwargs={"assistant_model": assistant_model})["text"]
transcription_ass = pipe(sample)["text"]
self.assertEqual(transcription_ass, transcription_non_ass)
self.assertEqual(
transcription_ass,
" Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.",
)
@slow
@require_torch
@require_torchaudio
def test_xls_r_to_en(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-xls-r-1b-21-to-en",
feature_extractor="facebook/wav2vec2-xls-r-1b-21-to-en",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": "A man said to the universe: “Sir, I exist."})
@slow
@require_torch
@require_torchaudio
def test_xls_r_from_en(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-xls-r-1b-en-to-15",
feature_extractor="facebook/wav2vec2-xls-r-1b-en-to-15",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": "Ein Mann sagte zu dem Universum, Sir, ich bin da."})
@slow
@require_torch
@require_torchaudio
def test_speech_to_text_leveraged(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-2-bart-base",
feature_extractor="patrickvonplaten/wav2vec2-2-bart-base",
tokenizer=AutoTokenizer.from_pretrained("patrickvonplaten/wav2vec2-2-bart-base"),
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]
output = speech_recognizer(audio)
self.assertEqual(output, {"text": "a man said to the universe sir i exist"})
@slow
@require_torch_accelerator
def test_wav2vec2_conformer_float16(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-conformer-rope-large-960h-ft",
device=torch_device,
dtype=torch.float16,
)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]
output = speech_recognizer(sample)
self.assertEqual(
output,
{"text": "MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"},
)
@require_torch
def test_chunking_fast(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "ZBT ZC")
@require_torch
def test_input_parameter_passthrough(self):
"""Test that chunked vs non chunked versions of ASR pipelines returns the same structure for the same inputs."""
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
inputs = {"raw": audio, "sampling_rate": 16_000, "id": 1}
chunked_output = speech_recognizer(inputs.copy(), chunk_length_s=30)
non_chunked_output = speech_recognizer(inputs.copy())
assert chunked_output.keys() == non_chunked_output.keys(), (
"The output structure should be the same for chunked vs non-chunked versions of asr pipelines."
)
@require_torch
def test_return_timestamps_ctc_fast(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
# Take short audio to keep the test readable
audio = ds[40]["audio"]["array"][:800]
output = speech_recognizer(audio, return_timestamps="char")
self.assertEqual(
output,
{
"text": "ZBT ZX G",
"chunks": [
{"text": " ", "timestamp": (0.0, 0.012)},
{"text": "Z", "timestamp": (0.012, 0.016)},
{"text": "B", "timestamp": (0.016, 0.02)},
{"text": "T", "timestamp": (0.02, 0.024)},
{"text": " ", "timestamp": (0.024, 0.028)},
{"text": "Z", "timestamp": (0.028, 0.032)},
{"text": "X", "timestamp": (0.032, 0.036)},
{"text": " ", "timestamp": (0.036, 0.04)},
{"text": "G", "timestamp": (0.04, 0.044)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word")
self.assertEqual(
output,
{
"text": "ZBT ZX G",
"chunks": [
{"text": "ZBT", "timestamp": (0.012, 0.024)},
{"text": "ZX", "timestamp": (0.028, 0.036)},
{"text": "G", "timestamp": (0.04, 0.044)},
],
},
)
@require_torch
@require_pyctcdecode
def test_chunking_fast_with_lm(self):
speech_recognizer = pipeline(
model="hf-internal-testing/processor_with_lm",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
# Batch_size = 1
output1 = speech_recognizer([audio_tiled], batch_size=1)
self.assertEqual(output1, [{"text": ANY(str)}])
self.assertEqual(output1[0]["text"][:6], "<s> <s")
# batch_size = 2
output2 = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output2, [{"text": ANY(str)}])
self.assertEqual(output2[0]["text"][:6], "<s> <s")
# TODO There is an offby one error because of the ratio.
# Maybe logits get affected by the padding on this random
# model is more likely. Add some masking ?
# self.assertEqual(output1, output2)
@require_torch
@require_pyctcdecode
def test_with_lm_fast(self):
speech_recognizer = pipeline(
model="hf-internal-testing/processor_with_lm",
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "<s> <s")
# Making sure the argument are passed to the decoder
# Since no change happens in the result, check the error comes from
# the `decode_beams` function.
with self.assertRaises(TypeError) as e:
output = speech_recognizer([audio_tiled], decoder_kwargs={"num_beams": 2})
self.assertContains(e.msg, "TypeError: decode_beams() got an unexpected keyword argument 'num_beams'")
output = speech_recognizer([audio_tiled], decoder_kwargs={"beam_width": 2})
@require_torch
@require_pyctcdecode
def test_with_local_lm_fast(self):
local_dir = snapshot_download("hf-internal-testing/processor_with_lm")
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model=local_dir,
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "<s> <s")
@require_torch
@slow
def test_whisper_prompted(self):
processor = AutoProcessor.from_pretrained("openai/whisper-tiny")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
model = model.to(torch_device)
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
chunk_length_s=30,
batch_size=16,
num_beams=1,
)
dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
sample = dataset[0]["audio"].get_all_samples().data
# prompt the model to misspell "Mr Quilter" as "Mr Quillter"
whisper_prompt = "Mr. Quillter."
prompt_ids = pipe.tokenizer.get_prompt_ids(whisper_prompt, return_tensors="pt").to(torch_device)
unprompted_result = pipe(sample.clone().detach())["text"]
prompted_result = pipe(sample, generate_kwargs={"prompt_ids": prompt_ids})["text"]
# fmt: off
EXPECTED_UNPROMPTED_RESULT = " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similarly drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins work is really Greek after all and can discover in it but little of rocky Ithaca. Lennils, pictures are a sort of upguards and atom paintings and Mason's exquisite itals are as national as a jingo poem. Mr. Birkut Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap on the back before he says like a shampoo or a Turkish bath. Next man"
EXPECTED_PROMPTED_RESULT = " Mr. Quillter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quillter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similarly drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins work is really great after all, and can discover in it but little of rocky Ithaca. Lennils, pictures are a sort of upguards and atom paintings, and Mason's exquisite itals are as national as a jingo poem. Mr. Birkut Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. Mr. John Collier gives his sitter a cheerful slap on the back before he says like a shampoo or a Turkish bath. Next man."
# fmt: on
self.assertEqual(unprompted_result, EXPECTED_UNPROMPTED_RESULT)
self.assertEqual(prompted_result, EXPECTED_PROMPTED_RESULT)
@require_torch
@slow
def test_whisper_longform(self):
# fmt: off
EXPECTED_RESULTS = Expectations(
{
(None, None): " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on Saturday, Rusty Cargo, container down by the Wharf, and challenge toothless drifters to the godless bughouse lets of tournament that is my segment. MUSIC Meanwhile!",
("xpu", None): " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting of classics, Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a Fisher shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I... APPLAUSE Sometimes I... Startle away, upside down on the monkey bars of a condemned playground on a superfund site. Get all heaped up on goofballs, rummaged that would discard a tag bag of defective toys, yank out a fist bowl of disembodied doll limbs, toss them on a stain kid's place mat from a defunct denys, set up a table inside a rusty cargo container down by the Wharf and challenge toothless drifters to the godless bug house blitz of tournament that is my segment.",
}
)
# fmt: on
EXPECTED_RESULT = EXPECTED_RESULTS.get_expectation()
processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
model = model.to(torch_device)
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
device=torch_device,
return_timestamps=True, # to allow longform generation
num_beams=1,
)
ds = load_dataset("distil-whisper/meanwhile", "default")["test"]
ds = ds.cast_column("audio", Audio(sampling_rate=16000))
audio = ds[:1]["audio"]
result = pipe(audio)[0]["text"]
assert result == EXPECTED_RESULT
@require_torch
@slow
def test_seamless_v2(self):
pipe = pipeline(
"automatic-speech-recognition",
model="facebook/seamless-m4t-v2-large",
device=torch_device,
)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]
result = pipe(sample, generate_kwargs={"tgt_lang": "eng"})
EXPECTED_RESULT = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel"
assert result["text"] == EXPECTED_RESULT
@require_torch
@slow
def test_chunking_and_timestamps(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 10
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ("A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats).strip()}])
output = speech_recognizer(audio, return_timestamps="char")
self.assertEqual(audio.shape, (74_400,))
self.assertEqual(speech_recognizer.feature_extractor.sampling_rate, 16_000)
# The audio is 74_400 / 16_000 = 4.65s long.
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": " ", "timestamp": (0.62, 0.66)},
{"text": "M", "timestamp": (0.68, 0.7)},
{"text": "A", "timestamp": (0.78, 0.8)},
{"text": "N", "timestamp": (0.84, 0.86)},
{"text": " ", "timestamp": (0.92, 0.98)},
{"text": "S", "timestamp": (1.06, 1.08)},
{"text": "A", "timestamp": (1.14, 1.16)},
{"text": "I", "timestamp": (1.16, 1.18)},
{"text": "D", "timestamp": (1.2, 1.24)},
{"text": " ", "timestamp": (1.24, 1.28)},
{"text": "T", "timestamp": (1.28, 1.32)},
{"text": "O", "timestamp": (1.34, 1.36)},
{"text": " ", "timestamp": (1.38, 1.42)},
{"text": "T", "timestamp": (1.42, 1.44)},
{"text": "H", "timestamp": (1.44, 1.46)},
{"text": "E", "timestamp": (1.46, 1.5)},
{"text": " ", "timestamp": (1.5, 1.56)},
{"text": "U", "timestamp": (1.58, 1.62)},
{"text": "N", "timestamp": (1.64, 1.68)},
{"text": "I", "timestamp": (1.7, 1.72)},
{"text": "V", "timestamp": (1.76, 1.78)},
{"text": "E", "timestamp": (1.84, 1.86)},
{"text": "R", "timestamp": (1.86, 1.9)},
{"text": "S", "timestamp": (1.96, 1.98)},
{"text": "E", "timestamp": (1.98, 2.02)},
{"text": " ", "timestamp": (2.02, 2.06)},
{"text": "S", "timestamp": (2.82, 2.86)},
{"text": "I", "timestamp": (2.94, 2.96)},
{"text": "R", "timestamp": (2.98, 3.02)},
{"text": " ", "timestamp": (3.06, 3.12)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": " ", "timestamp": (3.58, 3.6)},
{"text": "E", "timestamp": (3.66, 3.68)},
{"text": "X", "timestamp": (3.68, 3.7)},
{"text": "I", "timestamp": (3.9, 3.92)},
{"text": "S", "timestamp": (3.94, 3.96)},
{"text": "T", "timestamp": (4.0, 4.02)},
{"text": " ", "timestamp": (4.06, 4.1)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word")
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": "MAN", "timestamp": (0.68, 0.86)},
{"text": "SAID", "timestamp": (1.06, 1.24)},
{"text": "TO", "timestamp": (1.28, 1.36)},
{"text": "THE", "timestamp": (1.42, 1.5)},
{"text": "UNIVERSE", "timestamp": (1.58, 2.02)},
{"text": "SIR", "timestamp": (2.82, 3.02)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": "EXIST", "timestamp": (3.66, 4.02)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word", chunk_length_s=2.0)
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": "MAN", "timestamp": (0.68, 0.86)},
{"text": "SAID", "timestamp": (1.06, 1.24)},
{"text": "TO", "timestamp": (1.3, 1.36)},
{"text": "THE", "timestamp": (1.42, 1.48)},
{"text": "UNIVERSE", "timestamp": (1.58, 2.02)},
# Tiny change linked to chunking.
{"text": "SIR", "timestamp": (2.84, 3.02)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": "EXIST", "timestamp": (3.66, 4.02)},
],
},
)
# CTC models must specify return_timestamps type - cannot set `return_timestamps=True` blindly
with self.assertRaisesRegex(
ValueError,
"^CTC can either predict character level timestamps, or word level timestamps. "
"Set `return_timestamps='char'` or `return_timestamps='word'` as required.$",
):
_ = speech_recognizer(audio, return_timestamps=True)
@require_torch
@slow
def test_chunking_with_lm(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-base-100h-with-lm",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 10
audio = np.tile(audio, n_repeats)
output = speech_recognizer([audio], batch_size=2)
expected_text = "A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats
expected = [{"text": expected_text.strip()}]
self.assertEqual(output, expected)
@require_torch
def test_chunk_iterator(self):
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
inputs = torch.arange(100).long()
outs = list(chunk_iter(inputs, feature_extractor, 100, 0, 0))
self.assertEqual(len(outs), 1)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)])
self.assertEqual([o["is_last"] for o in outs], [True])
# two chunks no stride
outs = list(chunk_iter(inputs, feature_extractor, 50, 0, 0))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(50, 0, 0), (50, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 50), (1, 50)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
# two chunks incomplete last
outs = list(chunk_iter(inputs, feature_extractor, 80, 0, 0))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(80, 0, 0), (20, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 20)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
# one chunk since first is also last, because it contains only data
# in the right strided part we just mark that part as non stride
# This test is specifically crafted to trigger a bug if next chunk
# would be ignored by the fact that all the data would be
# contained in the strided left data.
outs = list(chunk_iter(inputs, feature_extractor, 105, 5, 5))
self.assertEqual(len(outs), 1)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)])
self.assertEqual([o["is_last"] for o in outs], [True])
@require_torch
def test_chunk_iterator_stride(self):
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
inputs = torch.arange(100).long()
input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[
"input_values"
]
outs = list(chunk_iter(inputs, feature_extractor, 100, 20, 10))
self.assertEqual(len(outs), 1)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)])
self.assertEqual([o["is_last"] for o in outs], [True])
outs = list(chunk_iter(inputs, feature_extractor, 80, 20, 10))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(80, 0, 10), (50, 20, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 50)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
outs = list(chunk_iter(inputs, feature_extractor, 90, 20, 0))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(90, 0, 0), (30, 20, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 90), (1, 30)])
outs = list(chunk_iter(inputs, feature_extractor, 36, 6, 6))
self.assertEqual(len(outs), 4)
self.assertEqual([o["stride"] for o in outs], [(36, 0, 6), (36, 6, 6), (36, 6, 6), (28, 6, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 36), (1, 36), (1, 36), (1, 28)])
inputs = torch.LongTensor([i % 2 for i in range(100)])
input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[
"input_values"
]
outs = list(chunk_iter(inputs, feature_extractor, 30, 5, 5))
self.assertEqual(len(outs), 5)
self.assertEqual([o["stride"] for o in outs], [(30, 0, 5), (30, 5, 5), (30, 5, 5), (30, 5, 5), (20, 5, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 30), (1, 30), (1, 30), (1, 30), (1, 20)])
self.assertEqual([o["is_last"] for o in outs], [False, False, False, False, True])
# (0, 25)
self.assertEqual(nested_simplify(input_values[:, :30]), nested_simplify(outs[0]["input_values"]))
# (25, 45)
self.assertEqual(nested_simplify(input_values[:, 20:50]), nested_simplify(outs[1]["input_values"]))
# (45, 65)
self.assertEqual(nested_simplify(input_values[:, 40:70]), nested_simplify(outs[2]["input_values"]))
# (65, 85)
self.assertEqual(nested_simplify(input_values[:, 60:90]), nested_simplify(outs[3]["input_values"]))
# (85, 100)
self.assertEqual(nested_simplify(input_values[:, 80:100]), nested_simplify(outs[4]["input_values"]))
@require_torch
def test_stride(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 10)
output = speech_recognizer({"raw": waveform, "stride": (0, 0), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "OB XB B EB BB B EB B OB X"})
# 0 effective ids Just take the middle one
output = speech_recognizer({"raw": waveform, "stride": (5000, 5000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": ""})
# Only 1 arange.
output = speech_recognizer({"raw": waveform, "stride": (0, 9000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "OB"})
# 2nd arange
output = speech_recognizer({"raw": waveform, "stride": (1000, 8000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "XB"})
@slow
@require_torch_accelerator
def test_slow_unfinished_sequence(self):
from transformers import GenerationConfig
pipe = pipeline(
"automatic-speech-recognition",
model="vasista22/whisper-hindi-large-v2",
device=torch_device,
)
# the audio is 4 seconds long
audio = hf_hub_download("Narsil/asr_dummy", filename="hindi.ogg", repo_type="dataset")
# Original model wasn't trained with timestamps and has incorrect generation config
out = pipe(
audio,
return_timestamps=True,
generate_kwargs={"generation_config": GenerationConfig.from_pretrained("openai/whisper-large-v2")},
)
self.assertEqual(
out,
{
"text": "मिर्ची में कितने विभिन्न प्रजातियां हैं",
"chunks": [{"timestamp": (0.58, None), "text": "मिर्ची में कितने विभिन्न प्रजातियां हैं"}],
},
)
@require_torch
def test_pipeline_assisted_generation(self):
"""Tests that we can run assisted generation in the pipeline"""
model = "openai/whisper-tiny"
pipe = pipeline("automatic-speech-recognition", model=model, assistant_model=model)
# We can run the pipeline
prompt = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:1]")[0]["audio"]
_ = pipe(prompt, generate_kwargs={"num_beams": 1})
# It is running assisted generation under the hood (e.g. flags incompatible with assisted gen will crash)
with self.assertRaises(TypeError):
_ = pipe(prompt, generate_kwargs={"num_beams": 2})
@require_torch
def test_pipeline_generation_kwargs(self):
"""Tests that we can pass kwargs to `generate`, as in the text generation pipelines"""
model = "openai/whisper-tiny"
asr = pipeline("automatic-speech-recognition", model=model)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:1]")
# BC: with `generate_kwargs` as a dictionary
res = asr(
dataset[0]["audio"],
generate_kwargs={"task": "transcribe", "max_new_tokens": 256},
)
self.assertEqual(
res["text"], " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."
)
# New: kwargs forwarded to `generate`
res = asr(
dataset[0]["audio"],
max_new_tokens=256,
task="transcribe",
)
self.assertEqual(
res["text"], " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."
)
def require_ffmpeg(test_case):
"""
Decorator marking a test that requires FFmpeg.
These tests are skipped when FFmpeg isn't installed.
"""
import subprocess
try:
subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL)
return test_case
except Exception:
return unittest.skip(reason="test requires ffmpeg")(test_case)
def bytes_iter(chunk_size, chunks):
for i in range(chunks):
yield bytes(range(i * chunk_size, (i + 1) * chunk_size))
@require_ffmpeg
| AutomaticSpeechRecognitionPipelineTests |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 11680,
"end": 11977
} | class ____(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
| ConfigParserInterpolation |
python | ray-project__ray | python/ray/_private/ray_perf.py | {
"start": 346,
"end": 572
} | class ____:
def small_value(self):
return b"ok"
def small_value_arg(self, x):
return b"ok"
def small_value_batch(self, n):
ray.get([small_value.remote() for _ in range(n)])
@ray.remote
| Actor |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_parameters.py | {
"start": 493,
"end": 2427
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls: type[Cosmology], cosmo: Cosmology):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
H0 = cosmo_cls.parameters["H0"]
assert isinstance(H0, Parameter)
assert "Hubble constant" in H0.__doc__
assert H0.unit == unit
assert H0.default is MISSING
# validation
assert H0.validate(cosmo, 1) == 1 * unit
assert H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo.__dict__["H0"]
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls: type[Cosmology], ba: BoundArguments):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
# =============================================================================
| ParameterH0TestMixin |
python | scipy__scipy | scipy/optimize/tests/test_trustregion.py | {
"start": 273,
"end": 571
} | class ____:
""" This is for testing callbacks."""
def __init__(self):
self.count = 0
self.accum = None
def __call__(self, x):
self.count += 1
if self.accum is None:
self.accum = np.array(x)
else:
self.accum += x
| Accumulator |
python | urllib3__urllib3 | test/with_dummyserver/test_https.py | {
"start": 46921,
"end": 51798
} | class ____:
def test_can_validate_san(self, san_server: ServerConfig) -> None:
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
with HTTPSConnectionPool(
san_server.host,
san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_common_name_without_san_fails(self, no_san_server: ServerConfig) -> None:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server.ca_certs,
) as https_pool:
with pytest.raises(
MaxRetryError,
) as e:
https_pool.request("GET", "/")
assert (
"mismatch, certificate is not valid" in str(e.value)
or "no appropriate subjectAltName" in str(e.value)
or "Empty Subject Alternative Name extension" in str(e.value)
)
def test_common_name_without_san_with_different_common_name(
self, no_san_server_with_different_commmon_name: ServerConfig
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context(verify_flags=0)
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
with HTTPSConnectionPool(
no_san_server_with_different_commmon_name.host,
no_san_server_with_different_commmon_name.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server_with_different_commmon_name.ca_certs,
ssl_context=ctx,
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert "mismatch, certificate is not valid for 'localhost'" in str(
e.value
) or "hostname 'localhost' doesn't match 'example.com'" in str(e.value)
@pytest.mark.parametrize("use_assert_hostname", [True, False])
def test_hostname_checks_common_name_respected(
self, no_san_server: ServerConfig, use_assert_hostname: bool
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context(verify_flags=0)
if not hasattr(ctx, "hostname_checks_common_name"):
pytest.skip("Test requires 'SSLContext.hostname_checks_common_name'")
ctx.load_verify_locations(no_san_server.ca_certs)
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
err: MaxRetryError | None
try:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ssl_context=ctx,
assert_hostname=no_san_server.host if use_assert_hostname else None,
) as https_pool:
https_pool.request("GET", "/")
except MaxRetryError as e:
err = e
else:
err = None
# commonName is only valid for DNS names, not IP addresses.
if no_san_server.host == "localhost":
assert err is None
# IP addresses should fail for commonName.
else:
assert err is not None
assert type(err.reason) is SSLError
assert isinstance(
err.reason.args[0], (ssl.SSLCertVerificationError, CertificateError)
)
def test_assert_hostname_invalid_san(
self, no_localhost_san_server: ServerConfig
) -> None:
"""Ensure SAN errors are not raised while assert_hostname is false"""
with HTTPSConnectionPool(
no_localhost_san_server.host,
no_localhost_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_localhost_san_server.ca_certs,
assert_hostname=False,
) as https_pool:
https_pool.request("GET", "/")
def test_assert_hostname_invalid_cn(
self, no_san_server_with_different_commmon_name: ServerConfig
) -> None:
"""Ensure CN errors are not raised while assert_hostname is false"""
ctx = urllib3.util.ssl_.create_urllib3_context(verify_flags=0)
with HTTPSConnectionPool(
no_san_server_with_different_commmon_name.host,
no_san_server_with_different_commmon_name.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server_with_different_commmon_name.ca_certs,
ssl_context=ctx,
assert_hostname=False,
) as https_pool:
https_pool.request("GET", "/")
| TestHTTPS_Hostname |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 114499,
"end": 115200
} | class ____(nn.Module):
"""
Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower
dimension.
Args:
in_channels (`int`):
Number of channels in the input.
out_channels (`int`):
Number of channels in the output.
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.classifier = nn.Linear(in_channels, out_channels)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return logits
| PerceiverProjectionPostprocessor |
python | PyCQA__pylint | tests/functional/r/redundant_unittest_assert.py | {
"start": 457,
"end": 1284
} | class ____(unittest.TestCase):
def test_something(self):
''' Simple test '''
some_var = 'It should be assertEqual'
# +1:[redundant-unittest-assert]
self.assertTrue('I meant assertEqual not assertTrue', some_var)
# +1:[redundant-unittest-assert]
self.assertFalse('I meant assertEqual not assertFalse', some_var)
# +1:[redundant-unittest-assert]
self.assertTrue(True, some_var)
# +1:[redundant-unittest-assert]
self.assertFalse(False, some_var)
# +1:[redundant-unittest-assert]
self.assertFalse(None, some_var)
# +1:[redundant-unittest-assert]
self.assertTrue(0, some_var)
self.assertTrue('should be' in some_var, some_var)
self.assertTrue(some_var, some_var)
@unittest.skip("don't run this")
| Tests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/pysqlite.py | {
"start": 16828,
"end": 17415
} | class ____(DATE):
def bind_processor( # type: ignore[override]
self, dialect: SQLiteDialect
) -> Optional[_BindProcessorType[Any]]:
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor( # type: ignore[override]
self, dialect: SQLiteDialect, coltype: object
) -> Optional[_ResultProcessorType[Any]]:
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
| _SQLite_pysqliteDate |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_state_dict.py | {
"start": 4838,
"end": 50917
} | class ____(FSDPTest):
@property
def world_size(self):
return min(torch.accelerator.device_count(), 2)
def _broadcast_state_dict(self, state_dict):
return _broadcast_state_dict(self.rank, state_dict)
def _state_compare(self, model, model_new, assert_fn, state_generator="parameters"):
state_base = list(getattr(model, state_generator)())
state_new = list(getattr(model_new, state_generator)())
# Regardless of `assert_fn`, the number of parameters should be the same
self.assertEqual(len(state_base), len(state_new))
assert_fn(state_base, state_new)
def _compare_models(
self, model, model_new, assert_fn, check_fp16=False, check_buffers=True
):
assert assert_fn in (self.assertEqual, self.assertNotEqual)
with FSDP.summon_full_params(model):
with FSDP.summon_full_params(model_new):
self._state_compare(model, model_new, assert_fn)
if check_buffers:
has_buffers = any(
len(list(m.buffers())) for m in (model, model_new)
)
if has_buffers:
self._state_compare(
model, model_new, assert_fn, state_generator="buffers"
)
if check_fp16:
for tensor in model_new.parameters():
self.assertEqual(tensor.dtype, torch.float16)
def _get_simple_nested_model(
self, *fsdp_args, wrap=True, checkpoint_wrap=False, **fsdp_kwargs
):
if wrap:
lin1 = nn.Linear(10, 10, bias=False).to(device_type)
lin2 = nn.Linear(10, 10, bias=False).to(device_type)
if checkpoint_wrap:
lin1 = checkpoint_wrapper(lin1)
lin2 = checkpoint_wrapper(lin2)
seq = nn.Sequential(FSDP(lin1, *fsdp_args, **fsdp_kwargs), lin2)
if checkpoint_wrap:
seq = checkpoint_wrapper(seq)
model = FSDP(seq, *fsdp_args, **fsdp_kwargs)
else:
model = nn.Sequential(
nn.Linear(10, 10, bias=False).to(device_type),
nn.Linear(10, 10, bias=False).to(device_type),
)
return model
def _get_simple_model(self, *fsdp_args, checkpoint_wrap=False, **fsdp_kwargs):
lin = nn.Linear(10, 10, bias=False).to(device_type)
if checkpoint_wrap:
lin = checkpoint_wrapper(lin)
model = FSDP(lin, *fsdp_args, **fsdp_kwargs)
return model
def _get_multibuffer_nested_model(
self, *fsdp_args, wrap=True, checkpoint_wrap=False, **fsdp_kwargs
):
full_p = torch.float32
lin_mp = fsdp_kwargs.pop("mixed_precision", None)
bn_mp = (
MixedPrecision(param_dtype=full_p, reduce_dtype=full_p, buffer_dtype=full_p)
if lin_mp
else None
)
if wrap:
lin1 = nn.Linear(10, 10, bias=False).to(device_type)
bn1 = nn.BatchNorm1d(10).to(device_type)
lin2 = nn.Linear(10, 10, bias=False).to(device_type)
if checkpoint_wrap:
lin1 = checkpoint_wrapper(lin1)
bn1 = checkpoint_wrapper(bn1)
lin2 = checkpoint_wrapper(lin2)
seq = nn.Sequential(
FSDP(lin1, *fsdp_args, mixed_precision=lin_mp, **fsdp_kwargs),
FSDP(bn1, *fsdp_args, mixed_precision=bn_mp, **fsdp_kwargs),
lin2,
)
if checkpoint_wrap:
seq = checkpoint_wrapper(seq)
model = FSDP(seq, *fsdp_args, **fsdp_kwargs)
else:
model = nn.Sequential(
nn.Linear(10, 10, bias=False).to(device_type),
nn.BatchNorm1d(10).to(device_type),
nn.Linear(10, 10, bias=False).to(device_type),
)
return model
def _get_non_fsdp_root_module(self, *fsdp_args, wrap=True, **fsdp_kwargs):
class FSDPContainer(nn.Module):
def __init__(self, fsdp_1, fsdp_2):
super().__init__()
self.non_fsdp_lin = nn.Linear(10, 10, bias=False).to(device_type)
self.fsdp_1 = fsdp_1
self.fsdp_2 = fsdp_2
def forward(self, x):
x = self.non_fsdp_lin(x)
x = self.fsdp_1(x)
x = self.fsdp_2(x)
return x
return FSDPContainer(
self._get_simple_nested_model(*fsdp_args, wrap=wrap, **fsdp_kwargs),
self._get_simple_nested_model(*fsdp_args, wrap=wrap, **fsdp_kwargs),
)
def _get_state_dict_mgr(
self,
model: nn.Module,
state_dict_type: str,
state_dict_rank0_and_offload: bool,
):
_state_dict_type = STATE_DICT_MAPPING[state_dict_type]
if state_dict_type == "state_dict":
config = FullStateDictConfig(
rank0_only=state_dict_rank0_and_offload,
offload_to_cpu=state_dict_rank0_and_offload,
)
elif state_dict_type == "local_state_dict":
config = LocalStateDictConfig(
offload_to_cpu=state_dict_rank0_and_offload,
)
elif state_dict_type == "sharded_state_dict":
config = ShardedStateDictConfig(
offload_to_cpu=state_dict_rank0_and_offload,
)
else:
raise ValueError("Unsupported state_dict_type")
return FSDP.state_dict_type(model, _state_dict_type, config)
def _validate_state_dict_contents(
self, model, fsdp_state_dict, state_dict_rank0_and_offload, ignore_keys=None
):
if state_dict_rank0_and_offload:
if self.rank == 0:
self.assertNotEqual(fsdp_state_dict, {})
for key, tensor in fsdp_state_dict.items():
if ignore_keys and key in ignore_keys:
continue
self.assertEqual(
tensor.device,
torch.device("cpu"),
f"{key} is unexpectedly on device {tensor.device}",
)
else:
# For non-FSDP roots, the non FSDP portion can still have parameters on rank 0,
# so bypass the check for now.
if isinstance(model, FSDP):
self.assertEqual(
fsdp_state_dict,
{},
f"Expected empty state_dict but got {fsdp_state_dict} on rank {dist.get_rank()}",
)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _UNFLATTENED_STATE_DICT_IMPLS)
@parametrize(
"checkpoint_wrap",
["source", "dest", "both", "source_after_wrap", "both_after_wrap"],
)
@parametrize("rank0_only_and_offload", [False, True])
def test_fsdp_state_dict_with_activation_checkpoint(
self, state_dict_type, checkpoint_wrap, rank0_only_and_offload
):
"""Tests saving the state dict, zeroing a target model's parameters, and
loading the state dict, where the source and target models may have a
checkpoint wrapper."""
def apply_ac_to_linears(model) -> None:
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=False,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model,
checkpoint_wrapper_fn=non_reentrant_wrapper,
check_fn=lambda submodule: isinstance(submodule, nn.Linear),
)
for model_call in [
partial(self._get_simple_model),
partial(self._get_simple_nested_model),
]:
model = model_call(checkpoint_wrap=(checkpoint_wrap in ("source", "both")))
if checkpoint_wrap in ("source_after_wrap", "both_after_wrap"):
apply_ac_to_linears(model)
with self._get_state_dict_mgr(
model, state_dict_type, rank0_only_and_offload
):
state_dict = _gather_state_dict(_get_state_dict(model, False, False))
# Possibly wrap new model in activation checkpoint wrapper to test save/
# load with this wrapper
model_new = model_call(
checkpoint_wrap=(checkpoint_wrap in ("dest", "both"))
)
if checkpoint_wrap == "both_after_wrap":
apply_ac_to_linears(model_new)
_zero_model(model_new)
self._compare_models(model, model_new, self.assertNotEqual)
if rank0_only_and_offload:
state_dict = self._broadcast_state_dict(state_dict)
# Would fail if checkpoint_wrapper did not correctly implement state_dict pre/post hooks
model_new.load_state_dict(state_dict, strict=True)
self._compare_models(model, model_new, self.assertEqual)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _UNFLATTENED_STATE_DICT_IMPLS)
@parametrize("rank0_only_and_offload", [False, True])
def test_state_dict_with_manual_ac_wrapper(
self,
state_dict_type: str,
rank0_only_and_offload: bool,
):
"""
Tests saving and loading a state dict for a model manually wrapped with
``FSDP(CheckpointWrapper(module))``, where the ``CheckpointWrapper`` is
wrapped before FSDP.
TODO: Investigate why the test above does not cover everything in this
test and de-duplicate afterwards.
"""
if state_dict_type == "sharded_state_dict" and rank0_only_and_offload:
return # not supported
model_ac = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
)
# Manually wrap FSDP without AC
model_no_ac = deepcopy(model_ac)
for i, layer in enumerate(model_no_ac.transformer.encoder.layers):
model_no_ac.transformer.encoder.layers[i] = FSDP(layer)
for i, layer in enumerate(model_no_ac.transformer.decoder.layers):
model_no_ac.transformer.decoder.layers[i] = FSDP(layer)
model_no_ac.transformer = FSDP(model_no_ac.transformer)
# Manually wrap FSDP with AC as `FSDP(CheckpointWrapper(module))`
for i, layer in enumerate(model_ac.transformer.encoder.layers):
layer = checkpoint_wrapper(layer)
model_ac.transformer.encoder.layers[i] = FSDP(layer)
for i, layer in enumerate(model_ac.transformer.decoder.layers):
layer = checkpoint_wrapper(layer)
model_ac.transformer.decoder.layers[i] = FSDP(layer)
model_ac.transformer = FSDP(model_ac.transformer)
# Save, load, and compare the two models
with self._get_state_dict_mgr(
model_no_ac, state_dict_type, rank0_only_and_offload
):
state_dict_no_ac = model_no_ac.state_dict()
with self._get_state_dict_mgr(
model_ac, state_dict_type, rank0_only_and_offload
):
state_dict_ac = model_ac.state_dict()
self.assertEqual(state_dict_ac.keys(), state_dict_no_ac.keys())
if rank0_only_and_offload:
state_dict_no_ac = self._broadcast_state_dict(state_dict_no_ac)
state_dict_ac = self._broadcast_state_dict(state_dict_ac)
with self._get_state_dict_mgr(
model_no_ac, state_dict_type, rank0_only_and_offload
):
model_no_ac.load_state_dict(state_dict_no_ac)
with self._get_state_dict_mgr(
model_ac, state_dict_type, rank0_only_and_offload
):
model_ac.load_state_dict(state_dict_ac)
self._compare_models(model_ac, model_no_ac, self.assertEqual)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_state_dict_with_shared_parameters(self, state_dict_type):
auto_wrap_policy = ModuleWrapPolicy(
{TransformerEncoderLayer, TransformerDecoderLayer}
)
model_creator = partial(
TransformerWithSharedParams.init,
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
{"auto_wrap_policy": auto_wrap_policy},
)
fsdp_model = model_creator()
with self._get_state_dict_mgr(fsdp_model, state_dict_type, False):
state_dict = fsdp_model.state_dict()
new_model = model_creator()
_zero_model(new_model, zero_buffers=True)
with self._get_state_dict_mgr(new_model, state_dict_type, False):
new_model.load_state_dict(state_dict)
@skip_if_lt_x_gpu(2)
@parametrize("use_orig_params", [False, True])
def test_state_dict_rank0_offload_save_load_flow(self, use_orig_params: bool):
"""Tests saving a model checkpoint only on rank 0 and loading it only
on rank 0 with ``sync_module_states=True`` to emulate the workflow to
avoid redundant CPU memory usage."""
auto_wrap_policy = ModuleWrapPolicy(
{TransformerEncoderLayer, TransformerDecoderLayer}
)
fsdp_kwargs = {
"auto_wrap_policy": auto_wrap_policy,
"use_orig_params": use_orig_params,
}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs,
)
# Force model parameters and buffers to be nonzero
with FSDP.summon_full_params(fsdp_model):
for tensor in itertools.chain(
fsdp_model.parameters(), fsdp_model.buffers()
):
if torch.count_nonzero(tensor) == 0:
with torch.no_grad():
tensor.add_(torch.ones_like(tensor))
with self._get_state_dict_mgr(fsdp_model, "state_dict", True):
state_dict = deepcopy(_get_state_dict(fsdp_model))
# Initialize a non-wrapped model on all ranks
new_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
)
_zero_model(new_model, zero_buffers=True)
# Only load the checkpoint on rank 0
if self.rank == 0:
new_model.load_state_dict(state_dict, strict=True)
_assert_module_states(
new_model,
process_group=self.process_group,
assert_fn=self.assertNotEqual,
)
# Broadcast the module states from rank 0 with `sync_module_states=True`
new_fsdp_model = FSDP(
new_model,
device_id=torch.accelerator.current_device_index(),
auto_wrap_policy=auto_wrap_policy,
sync_module_states=True,
)
# Check FSDP models are equal across ranks
with FSDP.summon_full_params(new_fsdp_model):
_assert_module_states(
new_fsdp_model,
process_group=self.process_group,
assert_fn=self.assertEqual,
)
# Check FSDP models correctly loaded the checkpoint
with FSDP.summon_full_params(fsdp_model):
with FSDP.summon_full_params(new_fsdp_model):
params = list(fsdp_model.parameters())
params_new = list(new_fsdp_model.parameters())
self.assertEqual(params, params_new)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("fp16", [True, False])
@parametrize("state_dict_rank0_and_offload", [True, False])
@parametrize("use_orig_params", [True, False])
def test_basic_save_and_load_state_dict(
self,
state_dict_type: str,
cpu_offload: bool,
fp16: bool,
state_dict_rank0_and_offload: bool,
use_orig_params: bool,
):
"""
Tests that we can save a state_dict and load it into a blank model
with various configs such as fp16 and cpu offload and parameters
match as expected.
"""
if (state_dict_rank0_and_offload and state_dict_type != "state_dict") or (
use_orig_params and state_dict_type not in _UNFLATTENED_STATE_DICT_IMPLS
):
return # not supported
device = torch.device(self.rank)
for model_call in [
partial(
self._get_non_fsdp_root_module,
cpu_offload=cpu_offload,
use_orig_params=use_orig_params,
),
partial(
self._get_simple_nested_model,
cpu_offload=cpu_offload,
use_orig_params=use_orig_params,
),
partial(
self._get_simple_model,
cpu_offload=cpu_offload,
use_orig_params=use_orig_params,
),
]:
model = model_call()
if fp16:
model.half()
# Run a forward/backward to compute gradients to test the case
# where there are gradients populated
inp = torch.randn((3, 10), device=device)
if fp16:
inp = inp.half()
model(inp).sum().backward()
ctx = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with ctx:
fsdp_state_dict = _get_state_dict(
model, cpu_offload.offload_params, fp16
)
ignore_keys = [k for k in fsdp_state_dict if NON_ROOT_FSDP_PREFIX in k]
self._validate_state_dict_contents(
model,
fsdp_state_dict,
state_dict_rank0_and_offload,
ignore_keys=ignore_keys,
)
if fp16:
# Verify fp16 is the type
for tensor in fsdp_state_dict.values():
self.assertEqual(tensor.dtype, torch.float16)
model_new = model_call()
if not cpu_offload.offload_params:
model_new = model_new.to(device_type)
if fp16:
model_new.half()
# Run a forward/backward to compute gradients to test the case
# where there are gradients populated
inp = torch.randn((3, 10), device=device)
if fp16:
inp = inp.half()
model_new(inp).sum().backward()
# zero the model to ensure parameters are different.
_zero_model(model_new, zero_buffers=True)
self._compare_models(model, model_new, self.assertNotEqual)
# Verify parameters are the same in the new model.
if state_dict_rank0_and_offload:
fsdp_state_dict = self._broadcast_state_dict(fsdp_state_dict)
with FSDP.state_dict_type(model_new, STATE_DICT_MAPPING[state_dict_type]):
model_new.load_state_dict(fsdp_state_dict, strict=True)
self._compare_models(model, model_new, self.assertEqual, check_fp16=fp16)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("mixed_precision", [True, False])
@parametrize("state_dict_rank0_and_offload", [True, False])
@parametrize("use_orig_params", [True, False])
def test_buffers_save_and_load_state_dict(
self,
state_dict_type: str,
cpu_offload: bool,
mixed_precision: bool,
state_dict_rank0_and_offload: bool,
use_orig_params: bool,
):
"""
Tests that we can save a state_dict and load it for modules with persistent buffers, including
in the context of non-default mixed precision, different ``state_dict_type`` s and CPU offloading.
"""
if (state_dict_rank0_and_offload and state_dict_type != "state_dict") or (
use_orig_params and state_dict_type not in _UNFLATTENED_STATE_DICT_IMPLS
):
return # not supported
mixed_precision = (
MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
if mixed_precision
else None
)
model_call = partial(
self._get_multibuffer_nested_model,
cpu_offload=cpu_offload,
use_orig_params=use_orig_params,
mixed_precision=mixed_precision,
)
model = model_call()
ctx = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with ctx:
fsdp_state_dict = _get_state_dict(model, cpu_offload.offload_params, False)
self._validate_state_dict_contents(
model, fsdp_state_dict, state_dict_rank0_and_offload
)
model_new = model_call()
if not cpu_offload.offload_params:
model_new = model_new.to(device_type)
# zero the model to ensure parameters are different.
_zero_model(model_new, zero_buffers=True)
self._compare_models(model, model_new, self.assertNotEqual)
# Verify parameters are the same in the new model.
if state_dict_rank0_and_offload:
fsdp_state_dict = self._broadcast_state_dict(fsdp_state_dict)
with FSDP.state_dict_type(model_new, STATE_DICT_MAPPING[state_dict_type]):
model_new.load_state_dict(fsdp_state_dict, strict=True)
self._compare_models(model, model_new, self.assertEqual)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize("mixed_precision", [True, False])
@parametrize("state_dict_rank0_and_offload", [True, False])
def test_save_and_load_after_forward_state_dict(
self, state_dict_type, mixed_precision, state_dict_rank0_and_offload
):
"""
Test that saving after some training results in params being updated as
expected.
"""
if state_dict_rank0_and_offload and state_dict_type != "state_dict":
return
torch.accelerator.set_device_index(self.rank)
mixed_precision = (
MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
if mixed_precision
else None
)
model = self._get_simple_nested_model(mixed_precision=mixed_precision)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
initial_params = get_full_params(model)
for _ in range(6):
inp = torch.randn(1, 10, device=torch.accelerator.current_device_index())
output = model(*inp)
loss = output.sum()
expected_dtype = torch.float32 if mixed_precision is None else torch.float16
self.assertEqual(expected_dtype, loss.dtype)
loss.backward()
optim.step()
trained_params = get_full_params(model)
# Ensure some training occurred
self.assertNotEqual(initial_params, trained_params)
# Save a copy of the state_dict
fsd_mgr = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with fsd_mgr:
state_dict = model.state_dict()
if state_dict_type == "state_dict":
state_dict = {k: v.clone() for k, v in state_dict.items()}
else:
for sharded_tensor in state_dict.values():
shard = sharded_tensor._local_shards[0]
shard.tensor = shard.tensor.clone().detach_()
self._validate_state_dict_contents(
model, state_dict, state_dict_rank0_and_offload
)
_zero_model(model)
# Ensure checkpointed params have the full param dtype
for tensor in state_dict.values():
self.assertEqual(tensor.dtype, torch.float32)
# Load state_dict into zeroed model
if state_dict_rank0_and_offload:
state_dict = self._broadcast_state_dict(state_dict)
with FSDP.state_dict_type(model, STATE_DICT_MAPPING[state_dict_type]):
model.load_state_dict(state_dict, strict=True)
loaded_params = get_full_params(model)
self.assertEqual(loaded_params, trained_params)
def _initialize_model(
self,
wrap_fsdp: bool,
wrap_ddp: bool = True,
register_buffers: bool = False,
):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp, register_buffers=register_buffers).to(device_type)
if wrap_fsdp:
model = FSDP(model)
elif wrap_ddp:
model = DistributedDataParallel(model, device_ids=[self.rank])
return model
@staticmethod
def _state_dict(model: Module, state_dict_type: str):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError as e:
raise ValueError(f"No state_dict type for {state_dict_type}") from e
with FSDP.state_dict_type(model, enum_val):
return model.state_dict()
@staticmethod
def _load_state_dict(
model: Module, state_dict_type: str, state_dict: dict[str, Any]
):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError as e:
raise ValueError(f"No state_dict for {state_dict_type}") from e
with FSDP.state_dict_type(model, enum_val):
return model.load_state_dict(state_dict, strict=True)
def _dist_train(
self, wrap_fsdp: bool, state_dict_type: str = "", move_to_cpu: bool = False
):
# TODO: Move this test to common_fsdp.
model = self._initialize_model(wrap_fsdp)
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(
64, 4, requires_grad=True, device=torch.device(device_type)
)
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
blank_model = FSDP(Model(True).to(device_type))
_zero_model(blank_model)
state_dict = self._state_dict(model, state_dict_type)
if move_to_cpu:
for key in list(state_dict.keys()):
tensor = state_dict[key]
if isinstance(tensor, torch.Tensor):
state_dict[key] = tensor.cpu()
else:
shards = tensor.local_shards()
if shards:
shards[0].tensor = shards[0].tensor.cpu()
self._load_state_dict(blank_model, state_dict_type, state_dict)
return get_full_params(blank_model)
else:
return list(model.parameters())
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_state_dict_save_load_flow(self, state_dict_type):
self.run_subtests(
{"move_to_cpu": [True, False]},
self._test_state_dict_save_load_flow,
state_dict_type=state_dict_type,
)
def _test_state_dict_save_load_flow(self, state_dict_type, move_to_cpu):
fsdp_params = self._dist_train(
wrap_fsdp=True,
state_dict_type=state_dict_type,
move_to_cpu=move_to_cpu,
)
ddp_params = self._dist_train(wrap_fsdp=False)
self.assertEqual(ddp_params, fsdp_params)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_fsdp_state_dict_keys(self, state_dict_type):
state_dict = self._state_dict(self._initialize_model(True), state_dict_type)
if state_dict_type == "local_state_dict":
self.assertEqual({FLAT_PARAM, f"inner.{FLAT_PARAM}"}, state_dict.keys())
elif state_dict_type in ("state_dict", "sharded_state_dict"):
# Keys should match local model.
local_model = self._initialize_model(wrap_fsdp=False, wrap_ddp=False)
local_keys = local_model.state_dict().keys()
self.assertEqual(state_dict.keys(), local_keys)
else:
raise NotImplementedError(f"No test for {state_dict_type}!")
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _UNFLATTENED_STATE_DICT_IMPLS)
@parametrize("state_dict_rank0_and_offload", [True, False])
@parametrize("fsdp_root", [True, False])
def test_state_dict_load_into_local_module(
self,
state_dict_type,
state_dict_rank0_and_offload,
fsdp_root,
):
"""
Tests that FSDP's state_dict can be loaded into a local model.
"""
if state_dict_rank0_and_offload and state_dict_type != "state_dict":
return
if not fsdp_root:
model = self._get_non_fsdp_root_module()
else:
model = self._initialize_model(wrap_fsdp=True, register_buffers=True)
optim = SGD(model.parameters(), lr=0.1)
if not fsdp_root:
in_data = torch.randn(
1, 10, requires_grad=True, device=torch.device(device_type)
)
else:
in_data = torch.rand(
64, 4, requires_grad=True, device=torch.device(device_type)
)
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
with FSDP.summon_full_params(model):
fsdp_params = deepcopy(list(model.parameters()))
# get FSDP state_dict. Note that by default we return full_state_dict.
sd_mgr = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with sd_mgr:
fsdp_state_dict = model.state_dict()
ignore_keys = [k for k in fsdp_state_dict if NON_ROOT_FSDP_PREFIX in k]
self._validate_state_dict_contents(
model,
fsdp_state_dict,
state_dict_rank0_and_offload,
ignore_keys=ignore_keys,
)
# Create zeroed local model
if not fsdp_root:
blank_local_model = self._get_non_fsdp_root_module(wrap=False)
else:
blank_local_model = self._initialize_model(
wrap_fsdp=False, wrap_ddp=False, register_buffers=True
)
# Nothing should be FSDP
for mod in blank_local_model.modules():
self.assertFalse(isinstance(mod, FSDP))
for param in blank_local_model.parameters():
with torch.no_grad():
param.zero_()
fsdp_state_dict = _gather_state_dict(fsdp_state_dict)
# Load fsdp's full state dict into the local and verify params are as
# expected.
if state_dict_rank0_and_offload:
fsdp_state_dict = self._broadcast_state_dict(fsdp_state_dict)
blank_local_model.load_state_dict(fsdp_state_dict, strict=True)
local_params = list(blank_local_model.parameters())
for fsdp_param, local_param in zip(fsdp_params, local_params):
self.assertEqual(fsdp_param, local_param)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize("double_nest", [True])
def test_state_dict_skip_module(self, state_dict_type, double_nest):
torch.accelerator.set_device_index(self.rank)
def _create_module(wrap_fsdp=True):
LINEAR_SKIP = "linear_skip"
ctx = enable_wrap(wrapper_cls=FSDP) if wrap_fsdp else nullcontext()
with ctx:
module = SkipModel(double_nest=double_nest)
# Full name of linear_skip param tensors in SkipModel, as would be
# stored in checkpoint.
linear_skip_tensor_names = [
k for k in dict(module.named_parameters()) if LINEAR_SKIP in k
]
# skip SkipModule
linear_skip = getattr(module, LINEAR_SKIP)
delattr(module, LINEAR_SKIP)
# Wrap FSDP
fsdp = wrap(module)
# reattach
setattr(module, LINEAR_SKIP, linear_skip)
return fsdp, linear_skip_tensor_names
fsdp, _ = _create_module()
# Run a forward pass
inp = torch.randn((1, 10), device=torch.accelerator.current_device_index())
loss = fsdp(inp)
loss.sum().backward()
with FSDP.state_dict_type(fsdp, STATE_DICT_MAPPING[state_dict_type]):
state_dict = fsdp.state_dict()
if self.rank == 0 and state_dict_type != "local_state_dict":
sd_keys = list(state_dict.keys())
expected = list(SkipModel(double_nest=False).state_dict().keys())
self.assertEqual(sorted(sd_keys), sorted(expected))
# TODO: parameters in linear_skip_tensor_names should not be handled
# by FSDP.state_dict(). Have a check once this is implemented in
# FSDP.state_dict().
# Check that it can be loaded into FSDP.
new_fsdp, _ = _create_module()
_zero_model(new_fsdp)
for p1, p2 in zip(fsdp.parameters(), new_fsdp.parameters()):
self.assertNotEqual(p1, p2)
with FSDP.state_dict_type(new_fsdp, STATE_DICT_MAPPING[state_dict_type]):
if state_dict_type != "local_state_dict":
# FlatParameter has not supported deepcopy yet.
state_dict = deepcopy(state_dict)
new_fsdp.load_state_dict(state_dict, strict=True)
for p1, p2 in zip(fsdp.parameters(), new_fsdp.parameters()):
self.assertEqual(p1, p2)
# Test that the checkpoint can be loaded into a local model.
local, _ = _create_module(wrap_fsdp=False)
for param in local.parameters():
with torch.no_grad():
param.zero_()
with fsdp.summon_full_params(fsdp):
for p1, p2 in zip(fsdp.parameters(), local.parameters()):
self.assertNotEqual(p1, p2)
if state_dict_type == "local_state_dict":
return
state_dict = _gather_state_dict(state_dict)
with fsdp.summon_full_params(fsdp):
if self.rank == 0:
local.load_state_dict(state_dict, strict=True)
for p1, p2 in zip(fsdp.parameters(), local.parameters()):
self.assertEqual(p1, p2)
@skip_if_lt_x_gpu(2)
def test_wrong_state_dict_config(self):
model = FSDP(Model(wrap_fsdp=True).to(device_type))
with self.assertRaisesRegex(RuntimeError, "Expected state_dict_config of type"):
with model.state_dict_type(
model, StateDictType.FULL_STATE_DICT, LocalStateDictConfig()
):
pass
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _UNFLATTENED_STATE_DICT_IMPLS)
@parametrize("prefix", [True, False])
@parametrize("ignore_inner", [True, False])
@parametrize("mixed_precision", [True, False])
def test_state_dict_with_ignored_modules(
self, state_dict_type, prefix, ignore_inner, mixed_precision
):
# Initialize an FSDP-wrapped model with an ignored module that includes
# both parameters and a buffer
model = Model(
wrap_fsdp=True,
register_buffers=True,
ignore_inner=ignore_inner,
mixed_precision=mixed_precision,
).to(device_type)
ignored_modules = [model.outer]
ignored_tensor_to_tensor_name = {
model.outer.bias: "outer.bias",
model.outer.weight: "outer.weight",
}
if ignore_inner:
ignored_tensor_to_tensor_name = {
**ignored_tensor_to_tensor_name,
model.inner.bias: "inner.bias",
model.inner.weight: "inner.weight",
}
# Note that when model.inner is not ignored this test also ensures
# non-ignored buffers are not cloned.
buffer_to_buffer_name = {
model.inner.buffer: "inner.buffer",
model.outer.buffer: "outer.buffer",
}
# expect fp16 model.inner.buffer with mixed_precisions
# expect fp32 sd.inner.buffer after restoring to original precision
# so skip AssertEqual
if mixed_precision and not ignore_inner:
buffer_to_buffer_name.pop(model.inner.buffer)
fsdp_model = FSDP(
model,
ignored_modules=ignored_modules,
mixed_precision=MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
if mixed_precision
else None,
)
prefix_str = "foo." if prefix else ""
with FSDP.state_dict_type(fsdp_model, STATE_DICT_MAPPING[state_dict_type]):
sd1 = _gather_state_dict(fsdp_model.state_dict(prefix=prefix_str))
with FSDP.summon_full_params(fsdp_model):
fsdp_params = deepcopy(list(fsdp_model.parameters()))
# Check that the ignored parameters and all buffers are not cloned
for tensor, tensor_name in {
**ignored_tensor_to_tensor_name,
**buffer_to_buffer_name,
}.items():
prefixed_tensor_name = f"{prefix_str}{tensor_name}"
self.assertTrue(prefixed_tensor_name in sd1)
self.assertEqual(
tensor.data_ptr(),
sd1[prefixed_tensor_name].data_ptr(),
f"{prefixed_tensor_name}",
)
# should not apply mixed_precision to ignored buffers
for buffer_name in buffer_to_buffer_name.values():
prefixed_buffer_name = f"{prefix_str}{buffer_name}"
self.assertTrue(prefixed_buffer_name in sd1)
self.assertEqual(sd1[prefixed_buffer_name].dtype, torch.float32)
# Check that the state dict can be loaded into a non-wrapped version of
# the model
nonwrapped_model = Model(wrap_fsdp=False, register_buffers=True).to(device_type)
for param in nonwrapped_model.parameters():
with torch.no_grad():
param.zero_()
to_load = {k[len(prefix_str) :]: v for k, v in sd1.items()}
nonwrapped_model.load_state_dict(to_load, strict=True)
local_params = list(nonwrapped_model.parameters())
for fsdp_param, local_param in zip(fsdp_params, local_params):
self.assertEqual(fsdp_param, local_param)
# Check that if we save a state dict again, the ignored parameters and
# buffer still have the same data pointer
with FSDP.state_dict_type(fsdp_model, STATE_DICT_MAPPING[state_dict_type]):
sd2 = fsdp_model.state_dict(prefix=prefix_str)
for tensor, tensor_name in {
**ignored_tensor_to_tensor_name,
**buffer_to_buffer_name,
}.items():
prefixed_tensor_name = f"{prefix_str}{tensor_name}"
self.assertTrue(prefixed_tensor_name in sd2)
self.assertEqual(tensor.data_ptr(), sd2[prefixed_tensor_name].data_ptr())
self.assertEqual(
sd1[prefixed_tensor_name].data_ptr(),
sd2[prefixed_tensor_name].data_ptr(),
)
@skip_if_lt_x_gpu(2)
def test_state_dict_type(self):
module = SkipModel(double_nest=True)
with enable_wrap(wrapper_cls=FSDP):
fsdp = wrap(module)
with FSDP.state_dict_type(fsdp, StateDictType.LOCAL_STATE_DICT):
pass
for module in FSDP.fsdp_modules(fsdp):
self.assertEqual(module._state_dict_type, StateDictType.FULL_STATE_DICT)
@skip_if_lt_x_gpu(2)
def test_local_state_dict_with_empty_ranks(self):
class Model(Module):
def __init__(self) -> None:
super().__init__()
self.my_tensor = torch.full((1,), 3.1415926)
self.my_parameter = nn.Parameter(self.my_tensor)
def forward(self, x):
return self.my_parameter
model = FSDP(Model().to(device_type))
with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT):
out = model(None)
out.backward()
state_dict = deepcopy(model.state_dict())
with torch.no_grad():
with FSDP.summon_full_params(model):
self.assertEqual(model.my_parameter.item(), 3.1415926)
model.my_parameter.copy_(torch.full((1,), 1.75).to(device_type))
self.assertEqual(model.my_parameter.item(), 1.75)
model.load_state_dict(state_dict)
with FSDP.summon_full_params(model):
self.assertEqual(model.my_parameter.item(), 3.1415926)
@skip_if_lt_x_gpu(2)
def test_torch_save_load(self):
model = Model(wrap_fsdp=True).to(device_type)
with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT):
state_dict = model.state_dict()
checkpoint = io.BytesIO()
torch.save(state_dict, checkpoint)
checkpoint.seek(0)
with torch.serialization.safe_globals(
[
Shard,
ShardMetadata,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
MEM_FORMAT_ENCODING,
_remote_device,
getattr,
ShardedTensor.ProcessGroupState,
ChunkShardingSpec,
]
):
state_dict_saved = torch.load(checkpoint)
for k, v in state_dict_saved.items():
if isinstance(v, ShardedTensor):
self.assertEqual(
v._local_shards[0].tensor, state_dict[k]._local_shards[0].tensor
)
else:
self.assertEqual(v, state_dict[k])
@skip_if_lt_x_gpu(2)
def test_shared_module_and_shared_parameter(self):
model = FSDP(TestDummyModel().to(device_type))
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT):
state_dict = model.state_dict()
self.assertEqual(
state_dict["random_parameter"], state_dict["shared_parameter"]
)
self.assertEqual(state_dict["net2.0.bias"], state_dict["net3.0.bias"])
self.assertEqual(state_dict["net2.0.weight"], state_dict["net3.0.weight"])
@skip_if_lt_x_gpu(2)
def test_full_state_dict_missing_unexpected_keys_cleaned(self):
model = self._get_simple_nested_model()
sd = model.state_dict()
# Create a missing key
sd.pop(next(iter(sd.keys())))
# Create an unexpected key
sd["unexpected"] = torch.ones(1)
missing, unexpected = model.load_state_dict(sd, strict=False)
assert len(missing) == 1
assert len(unexpected) == 1
self.assertTrue(FSDP_PREFIX not in missing[0])
self.assertTrue(FSDP_PREFIX not in unexpected[0])
@skip_if_lt_x_gpu(2)
def test_sharded_load_multi_backend_pg(self):
auto_wrap_policy = ModuleWrapPolicy(
{TransformerEncoderLayer, TransformerDecoderLayer}
)
fsdp_kwargs = {
"auto_wrap_policy": auto_wrap_policy,
"use_orig_params": True,
}
for load_cpu in [True, False]:
with self.subTest(load_cpu=load_cpu):
backend = torch.distributed.get_default_backend_for_device(device_type)
pg = dist.new_group(backend=f"cpu:gloo,{device_type}:{backend}")
fsdp_model = TransformerWithSharedParams.init(
pg,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs,
)
FSDP.set_state_dict_type(fsdp_model, StateDictType.SHARDED_STATE_DICT)
sharded = fsdp_model.state_dict()
param_copy = [t.clone().detach_() for t in fsdp_model.parameters()]
with torch.no_grad():
for p in fsdp_model.parameters():
p.zero_()
if load_cpu:
# Offload to CPU to simulate CPU state_dict load
for k, v in sharded.items():
sharded[k] = v.cpu()
fsdp_model.load_state_dict(sharded)
for p1, p2 in zip(param_copy, fsdp_model.parameters()):
self.assertEqual(p1, p2, f"not equal: {p1.sum()} vs {p2.sum()}")
@skip_if_lt_x_gpu(2)
def test_world_size_one(self):
my_pg = None
for i in range(self.world_size):
pg = dist.new_group(ranks=[i])
if i == self.rank:
my_pg = pg
model = TransformerWithSharedParams.init(
my_pg,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
)
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = model.state_dict()
model.load_state_dict(state_dict)
dist.barrier()
| TestFSDPStateDict |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 536805,
"end": 537122
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PullRequest", graphql_name="node")
| PullRequestEdge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.