language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | vyperlang__vyper | vyper/venom/passes/phi_elimination.py | {
"start": 187,
"end": 3531
} | class ____(IRPass):
phi_to_origins: dict[IRInstruction, set[IRInstruction]]
def run_pass(self):
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.updater = InstUpdater(self.dfg)
self._calculate_phi_origins()
for _, inst in self.dfg.outputs.copy().items():
if inst.opcode != "phi":
continue
self._process_phi(inst)
# sort phis to top of basic block
for bb in self.function.get_basic_blocks():
bb.ensure_well_formed()
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
def _process_phi(self, inst: IRInstruction):
srcs = self.phi_to_origins[inst]
if len(srcs) == 1:
src = srcs.pop()
if src == inst:
return
self.updater.mk_assign(inst, src.output)
def _calculate_phi_origins(self):
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.phi_to_origins = dict()
for bb in self.function.get_basic_blocks():
for inst in bb.instructions:
if inst.opcode != "phi":
break
self._get_phi_origins(inst)
def _get_phi_origins(self, inst: IRInstruction):
assert inst.opcode == "phi" # sanity
visited: set[IRInstruction] = set()
self.phi_to_origins[inst] = self._get_phi_origins_r(inst, visited)
# traverse chains of phis and stores to get the "root" instructions
# for phis.
def _get_phi_origins_r(
self, inst: IRInstruction, visited: set[IRInstruction]
) -> set[IRInstruction]:
if inst.opcode == "phi":
if inst in self.phi_to_origins:
return self.phi_to_origins[inst]
if inst in visited:
# we have hit a dfg cycle. break the recursion.
# if it is only visited we have found a self
# reference, and we won't find anything more by
# continuing the recursion.
return set()
visited.add(inst)
res: set[IRInstruction] = set()
for _, var in inst.phi_operands:
next_inst = self.dfg.get_producing_instruction(var)
assert next_inst is not None, (inst, var)
res |= self._get_phi_origins_r(next_inst, visited)
if len(res) > 1:
# if this phi has more than one origin, then for future
# phis, it is better to treat this as a barrier in the
# graph traversal. for example (without basic blocks)
# %a = 1
# %b = 2
# %c = phi %a, %b ; has two origins
# %d = %c
# %e = %d
# %f = phi %d, %e
# in this case, %f should reduce to %c.
return set([inst])
return res
if inst.opcode == "assign" and isinstance(inst.operands[0], IRVariable):
# traverse assignment chain
var = inst.operands[0]
next_inst = self.dfg.get_producing_instruction(var)
assert next_inst is not None
return self._get_phi_origins_r(next_inst, visited)
# root of the phi/assignment chain
return set([inst])
| PhiEliminationPass |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/cloud/test_jobs.py | {
"start": 6917,
"end": 17421
} | class ____:
async def test_run_success(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
return_value=Response(200, json={"data": {"id": 10000, "status": 10}})
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": ["manifest.json"]}))
result = await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials, job_id=1
)
assert result == {
"id": 10000,
"status": 10,
"artifact_paths": ["manifest.json"],
}
async def test_run_success_with_wait(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
side_effect=[
Response(200, json={"data": {"id": 10000, "status": 1}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(200, json={"data": {"id": 10000, "status": 10}}),
]
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": ["manifest.json"]}))
result = await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
)
assert result == {
"id": 10000,
"status": 10,
"artifact_paths": ["manifest.json"],
}
async def test_run_failure_with_wait_and_retry(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
side_effect=[
Response(200, json={"data": {"id": 10000, "status": 1}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(
200, json={"data": {"id": 10000, "status": 20}}
), # failed status
]
)
with pytest.raises(DbtCloudJobRunFailed):
await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
retry_filtered_models_attempts=1,
)
async def test_run_with_unexpected_status(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
side_effect=[
Response(200, json={"data": {"id": 10000, "status": 1}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(
200, json={"data": {"id": 10000, "status": 42}}
), # unknown status
]
)
with pytest.raises(
ValueError, match="42 is not a valid DbtCloudJobRunStatus"
):
await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
retry_filtered_models_attempts=0,
)
async def test_run_failure_no_run_id(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": {"project_id": 12345}}))
with pytest.raises(RuntimeError, match="Unable to determine run ID"):
await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
)
async def test_run_cancelled_with_wait(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
side_effect=[
Response(200, json={"data": {"id": 10000, "status": 1}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(200, json={"data": {"id": 10000, "status": 30}}),
]
)
with pytest.raises(DbtCloudJobRunCancelled):
await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
retry_filtered_models_attempts=0,
)
async def test_run_timed_out(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
side_effect=[
Response(200, json={"data": {"id": 10000, "status": 1}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
Response(200, json={"data": {"id": 10000, "status": 3}}),
]
)
with pytest.raises(DbtCloudJobRunTimedOut):
await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
poll_frequency_seconds=1,
max_wait_seconds=3,
retry_filtered_models_attempts=0,
)
async def test_run_success_failed_artifacts(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
return_value=Response(200, json={"data": {"id": 10000, "status": 10}})
)
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/",
headers=HEADERS,
).mock(
return_value=Response(
500, json={"status": {"user_message": "This is what went wrong"}}
)
)
result = await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials, job_id=1
)
assert result == {"id": 10000, "status": 10}
| TestTriggerDbtCloudJobRunAndWaitForCompletion |
python | scikit-learn__scikit-learn | sklearn/cluster/_hdbscan/hdbscan.py | {
"start": 16448,
"end": 41792
} | class ____(ClusterMixin, BaseEstimator):
"""Cluster data using hierarchical density-based clustering.
HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications
with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon
values and integrates the result to find a clustering that gives the best
stability over epsilon.
This allows HDBSCAN to find clusters of varying densities (unlike
:class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection.
Read more in the :ref:`User Guide <hdbscan>`.
.. versionadded:: 1.3
Parameters
----------
min_cluster_size : int, default=5
The minimum number of samples in a group for that group to be
considered a cluster; groupings smaller than this size will be left
as noise.
min_samples : int, default=None
The parameter `k` used to calculate the distance between a point
`x_p` and its k-th nearest neighbor.
When `None`, defaults to `min_cluster_size`.
cluster_selection_epsilon : float, default=0.0
A distance threshold. Clusters below this value will be merged.
See [5]_ for more information.
max_cluster_size : int, default=None
A limit to the size of clusters returned by the `"eom"` cluster
selection algorithm. There is no limit when `max_cluster_size=None`.
Has no effect if `cluster_selection_method="leaf"`.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array.
- If metric is a string or callable, it must be one of
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
for its metric parameter.
- If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
metric_params : dict, default=None
Arguments passed to the distance metric.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
See [3]_ for more information.
algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto"
Exactly which algorithm to use for computing core distances; By default
this is set to `"auto"` which attempts to use a
:class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses
a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and
`"ball_tree"` algorithms use the
:class:`~sklearn.neighbors.NearestNeighbors` estimator.
If the `X` passed during `fit` is sparse or `metric` is invalid for
both :class:`~sklearn.neighbors.KDTree` and
:class:`~sklearn.neighbors.BallTree`, then it resolves to use the
`"brute"` algorithm.
leaf_size : int, default=40
Leaf size for trees responsible for fast nearest neighbour queries when
a KDTree or a BallTree are used as core-distance algorithms. A large
dataset size and small `leaf_size` may induce excessive memory usage.
If you are running out of memory consider increasing the `leaf_size`
parameter. Ignored for `algorithm="brute"`.
n_jobs : int, default=None
Number of jobs to run in parallel to calculate distances.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
cluster_selection_method : {"eom", "leaf"}, default="eom"
The method used to select clusters from the condensed tree. The
standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`)
algorithm to find the most persistent clusters. Alternatively you can
instead select the clusters at the leaves of the tree -- this provides
the most fine grained and homogeneous clusters.
allow_single_cluster : bool, default=False
By default HDBSCAN* will not produce a single cluster, setting this
to True will override this and allow single cluster results in
the case that you feel this is a valid result for your dataset.
store_centers : str, default=None
Which, if any, cluster centers to compute and store. The options are:
- `None` which does not compute nor store any centers.
- `"centroid"` which calculates the center by taking the weighted
average of their positions. Note that the algorithm uses the
euclidean metric and does not guarantee that the output will be
an observed data point.
- `"medoid"` which calculates the center by taking the point in the
fitted data which minimizes the distance to all other points in
the cluster. This is slower than "centroid" since it requires
computing additional pairwise distances between points of the
same cluster but guarantees the output is an observed data point.
The medoid is also well-defined for arbitrary metrics, and does not
depend on a euclidean metric.
- `"both"` which computes and stores both forms of centers.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite data passed to :term:`fit`, a copy will first be
made, guaranteeing that the original data will be unchanged.
Currently, it only applies when `metric="precomputed"`, when passing
a dense array or a CSR sparse matrix and when `algorithm="brute"`.
.. versionchanged:: 1.10
The default value for `copy` will change from `False` to `True`
in version 1.10.
Attributes
----------
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to :term:`fit`.
Outliers are labeled as follows:
- Noisy samples are given the label -1.
- Samples with infinite elements (+/- np.inf) are given the label -2.
- Samples with missing data are given the label -3, even if they
also have infinite elements.
probabilities_ : ndarray of shape (n_samples,)
The strength with which each sample is a member of its assigned
cluster.
- Clustered samples have probabilities proportional to the degree that
they persist as part of the cluster.
- Noisy samples have probability zero.
- Samples with infinite elements (+/- np.inf) have probability 0.
- Samples with missing data have probability `np.nan`.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
centroids_ : ndarray of shape (n_clusters, n_features)
A collection containing the centroid of each cluster calculated under
the standard euclidean metric. The centroids may fall "outside" their
respective clusters if the clusters themselves are non-convex.
Note that `n_clusters` only counts non-outlier clusters. That is to
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
medoids_ : ndarray of shape (n_clusters, n_features)
A collection containing the medoid of each cluster calculated under
the whichever metric was passed to the `metric` parameter. The
medoids are points in the original cluster which minimize the average
distance to all other points in that cluster under the chosen metric.
These can be thought of as the result of projecting the `metric`-based
centroid back onto the cluster.
Note that `n_clusters` only counts non-outlier clusters. That is to
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
See Also
--------
DBSCAN : Density-Based Spatial Clustering of Applications
with Noise.
OPTICS : Ordering Points To Identify the Clustering Structure.
Birch : Memory-efficient, online-learning algorithm.
Notes
-----
The `min_samples` parameter includes the point itself, whereas the implementation in
`scikit-learn-contrib/hdbscan <https://github.com/scikit-learn-contrib/hdbscan>`_
does not. To get the same results in both versions, the value of `min_samples` here
must be 1 greater than the value used in `scikit-learn-contrib/hdbscan
<https://github.com/scikit-learn-contrib/hdbscan>`_.
References
----------
.. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering
based on hierarchical density estimates.
<10.1007/978-3-642-37456-2_14>`
.. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J.
Hierarchical density estimates for data clustering, visualization,
and outlier detection.<10.1145/2733381>`
.. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the
cluster tree.
<https://papers.nips.cc/paper/2010/hash/
b534ba68236ba543ae44b22bd110a1d6-Abstract.html>`_
.. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and
Sander, J. Density-Based Clustering Validation.
<https://epubs.siam.org/doi/pdf/10.1137/1.9781611973440.96>`_
.. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical
Density-based Cluster Selection."<1911.02282>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import HDBSCAN
>>> from sklearn.datasets import load_digits
>>> X, _ = load_digits(return_X_y=True)
>>> hdb = HDBSCAN(copy=True, min_cluster_size=20)
>>> hdb.fit(X)
HDBSCAN(copy=True, min_cluster_size=20)
>>> hdb.labels_.shape == (X.shape[0],)
True
>>> np.unique(hdb.labels_).tolist()
[-1, 0, 1, 2, 3, 4, 5, 6, 7]
"""
_parameter_constraints = {
"min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")],
"min_samples": [Interval(Integral, left=1, right=None, closed="left"), None],
"cluster_selection_epsilon": [
Interval(Real, left=0, right=None, closed="left")
],
"max_cluster_size": [
None,
Interval(Integral, left=1, right=None, closed="left"),
],
"metric": [
StrOptions(FAST_METRICS | set(_VALID_METRICS) | {"precomputed"}),
callable,
],
"metric_params": [dict, None],
"alpha": [Interval(Real, left=0, right=None, closed="neither")],
"algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
"leaf_size": [Interval(Integral, left=1, right=None, closed="left")],
"n_jobs": [Integral, None],
"cluster_selection_method": [StrOptions({"eom", "leaf"})],
"allow_single_cluster": ["boolean"],
"store_centers": [None, StrOptions({"centroid", "medoid", "both"})],
"copy": ["boolean", Hidden(StrOptions({"warn"}))],
}
def __init__(
self,
min_cluster_size=5,
min_samples=None,
cluster_selection_epsilon=0.0,
max_cluster_size=None,
metric="euclidean",
metric_params=None,
alpha=1.0,
algorithm="auto",
leaf_size=40,
n_jobs=None,
cluster_selection_method="eom",
allow_single_cluster=False,
store_centers=None,
copy="warn",
):
self.min_cluster_size = min_cluster_size
self.min_samples = min_samples
self.alpha = alpha
self.max_cluster_size = max_cluster_size
self.cluster_selection_epsilon = cluster_selection_epsilon
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.n_jobs = n_jobs
self.cluster_selection_method = cluster_selection_method
self.allow_single_cluster = allow_single_cluster
self.store_centers = store_centers
self.copy = copy
@_fit_context(
# HDBSCAN.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Find clusters based on hierarchical density-based clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
ndarray of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
`metric='precomputed'`.
y : None
Ignored.
Returns
-------
self : object
Returns self.
"""
# TODO(1.10): remove "warn" option
# and leave copy to its default value where applicable in examples and doctests.
if self.copy == "warn":
warn(
"The default value of `copy` will change from False to True in 1.10."
" Explicitly set a value for `copy` to silence this warning.",
FutureWarning,
)
_copy = False
else:
_copy = self.copy
if self.metric == "precomputed" and self.store_centers is not None:
raise ValueError(
"Cannot store centers when using a precomputed distance matrix."
)
self._metric_params = self.metric_params or {}
if self.metric != "precomputed":
# Non-precomputed matrices may contain non-finite values.
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
ensure_all_finite=False,
dtype=np.float64,
)
self._raw_data = X
all_finite = True
try:
_assert_all_finite(X.data if issparse(X) else X)
except ValueError:
all_finite = False
if not all_finite:
# Pass only the purely finite indices into hdbscan
# We will later assign all non-finite points their
# corresponding labels, as specified in `_OUTLIER_ENCODING`
# Reduce X to make the checks for missing/outlier samples more
# convenient.
reduced_X = X.sum(axis=1)
# Samples with missing data are denoted by the presence of
# `np.nan`
missing_index = np.isnan(reduced_X).nonzero()[0]
# Outlier samples are denoted by the presence of `np.inf`
infinite_index = np.isinf(reduced_X).nonzero()[0]
# Continue with only finite samples
finite_index = _get_finite_row_indices(X)
internal_to_raw = {x: y for x, y in enumerate(finite_index)}
X = X[finite_index]
elif issparse(X):
# Handle sparse precomputed distance matrices separately
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
dtype=np.float64,
force_writeable=True,
)
else:
# Only non-sparse, precomputed distance matrices are handled here
# and thereby allowed to contain numpy.inf for missing distances
# Perform data validation after removing infinite values (numpy.inf)
# from the given distance matrix.
X = validate_data(
self, X, ensure_all_finite=False, dtype=np.float64, force_writeable=True
)
if np.isnan(X).any():
# TODO: Support np.nan in Cython implementation for precomputed
# dense HDBSCAN
raise ValueError("np.nan values found in precomputed-dense")
if X.shape[0] == 1:
raise ValueError("n_samples=1 while HDBSCAN requires more than one sample")
self._min_samples = (
self.min_cluster_size if self.min_samples is None else self.min_samples
)
if self._min_samples > X.shape[0]:
raise ValueError(
f"min_samples ({self._min_samples}) must be at most the number of"
f" samples in X ({X.shape[0]})"
)
mst_func = None
kwargs = dict(
X=X,
min_samples=self._min_samples,
alpha=self.alpha,
metric=self.metric,
n_jobs=self.n_jobs,
**self._metric_params,
)
if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics:
raise ValueError(
f"{self.metric} is not a valid metric for a KDTree-based algorithm."
" Please select a different metric."
)
elif (
self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics
):
raise ValueError(
f"{self.metric} is not a valid metric for a BallTree-based algorithm."
" Please select a different metric."
)
if self.algorithm != "auto":
if (
self.metric != "precomputed"
and issparse(X)
and self.algorithm != "brute"
):
raise ValueError("Sparse data matrices only support algorithm `brute`.")
if self.algorithm == "brute":
mst_func = _hdbscan_brute
kwargs["copy"] = _copy
elif self.algorithm == "kd_tree":
mst_func = _hdbscan_prims
kwargs["algo"] = "kd_tree"
kwargs["leaf_size"] = self.leaf_size
else:
mst_func = _hdbscan_prims
kwargs["algo"] = "ball_tree"
kwargs["leaf_size"] = self.leaf_size
else:
if issparse(X) or self.metric not in FAST_METRICS:
# We can't do much with sparse matrices ...
mst_func = _hdbscan_brute
kwargs["copy"] = _copy
elif self.metric in KDTree.valid_metrics:
# TODO: Benchmark KD vs Ball Tree efficiency
mst_func = _hdbscan_prims
kwargs["algo"] = "kd_tree"
kwargs["leaf_size"] = self.leaf_size
else:
# Metric is a valid BallTree metric
mst_func = _hdbscan_prims
kwargs["algo"] = "ball_tree"
kwargs["leaf_size"] = self.leaf_size
self._single_linkage_tree_ = mst_func(**kwargs)
self.labels_, self.probabilities_ = tree_to_labels(
self._single_linkage_tree_,
self.min_cluster_size,
self.cluster_selection_method,
self.allow_single_cluster,
self.cluster_selection_epsilon,
self.max_cluster_size,
)
if self.metric != "precomputed" and not all_finite:
# Remap indices to align with original data in the case of
# non-finite entries. Samples with np.inf are mapped to -1 and
# those with np.nan are mapped to -2.
self._single_linkage_tree_ = remap_single_linkage_tree(
self._single_linkage_tree_,
internal_to_raw,
# There may be overlap for points w/ both `np.inf` and `np.nan`
non_finite=set(np.hstack([infinite_index, missing_index])),
)
new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32)
new_labels[finite_index] = self.labels_
new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
self.labels_ = new_labels
new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64)
new_probabilities[finite_index] = self.probabilities_
# Infinite outliers have probability 0 by convention, though this
# is arbitrary.
new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"]
new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"]
self.probabilities_ = new_probabilities
if self.store_centers:
self._weighted_cluster_center(X)
return self
def fit_predict(self, X, y=None):
"""Cluster X and return the associated cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
ndarray of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
`metric='precomputed'`.
y : None
Ignored.
Returns
-------
y : ndarray of shape (n_samples,)
Cluster labels.
"""
self.fit(X)
return self.labels_
def _weighted_cluster_center(self, X):
"""Calculate and store the centroids/medoids of each cluster.
This requires `X` to be a raw feature array, not precomputed
distances. Rather than return outputs directly, this helper method
instead stores them in the `self.{centroids, medoids}_` attributes.
The choice for which attributes are calculated and stored is mediated
by the value of `self.store_centers`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The feature array that the estimator was fit with.
"""
# Number of non-noise clusters
n_clusters = len(set(self.labels_) - {-1, -2})
mask = np.empty((X.shape[0],), dtype=np.bool_)
make_centroids = self.store_centers in ("centroid", "both")
make_medoids = self.store_centers in ("medoid", "both")
if make_centroids:
self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
if make_medoids:
self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
# Need to handle iteratively seen each cluster may have a different
# number of samples, hence we can't create a homogeneous 3D array.
for idx in range(n_clusters):
mask = self.labels_ == idx
data = X[mask]
strength = self.probabilities_[mask]
if make_centroids:
self.centroids_[idx] = np.average(data, weights=strength, axis=0)
if make_medoids:
# TODO: Implement weighted argmin PWD backend
dist_mat = pairwise_distances(
data, metric=self.metric, **self._metric_params
)
dist_mat = dist_mat * strength
medoid_index = np.argmin(dist_mat.sum(axis=1))
self.medoids_[idx] = data[medoid_index]
return
def dbscan_clustering(self, cut_distance, min_cluster_size=5):
"""Return clustering given by DBSCAN without border points.
Return clustering that would be equivalent to running DBSCAN* for a
particular cut_distance (or epsilon) DBSCAN* can be thought of as
DBSCAN without the border points. As such these results may differ
slightly from `cluster.DBSCAN` due to the difference in implementation
over the non-core points.
This can also be thought of as a flat clustering derived from constant
height cut through the single linkage tree.
This represents the result of selecting a cut value for robust single linkage
clustering. The `min_cluster_size` allows the flat clustering to declare noise
points (and cluster smaller than `min_cluster_size`).
Parameters
----------
cut_distance : float
The mutual reachability distance cut value to use to generate a
flat clustering.
min_cluster_size : int, default=5
Clusters smaller than this value with be called 'noise' and remain
unclustered in the resulting flat clustering.
Returns
-------
labels : ndarray of shape (n_samples,)
An array of cluster labels, one per datapoint.
Outliers are labeled as follows:
- Noisy samples are given the label -1.
- Samples with infinite elements (+/- np.inf) are given the label -2.
- Samples with missing data are given the label -3, even if they
also have infinite elements.
"""
labels = labelling_at_cut(
self._single_linkage_tree_, cut_distance, min_cluster_size
)
# Infer indices from labels generated during `fit`
infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"]
missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"]
# Overwrite infinite/missing outlier samples (otherwise simple noise)
labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
return labels
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.input_tags.allow_nan = self.metric != "precomputed"
return tags
| HDBSCAN |
python | keras-team__keras | keras/src/layers/normalization/rms_normalization.py | {
"start": 162,
"end": 3008
} | class ____(Layer):
"""Root Mean Square (RMS) Normalization layer.
This layer normalizes the input tensor based on its RMS value.
The Keras layer performs the operation as described in
[Root Mean Square Layer Normalization](https://arxiv.org/pdf/1910.07467)
by Biao Zhang et al.
If `scale` is enabled, the layer will scale the normalized outputs via
a learnable scaling factor.
So, with scaling enabled, the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
```python
rms_normalization(x) = x * rsqrt(mean(square(x))) * scale
```
For example:
>>> layer = keras.layers.RMSNormalization()
>>> layer.build([5, 20, 30, 10])
>>> print(layer.scale.shape)
(10,)
>>> layer(np.random.rand(1, 10)).numpy()
array([[0.35098287, 1.0495652 , 1.4645109 , 1.2944688 , 0.31124955,
1.2768592 , 1.184331 , 0.17474432, 0.49955517, 1.2428929 ]],
dtype=float32)
Args:
axis: int. The axis on which to perform the normalization.
epsilon: float. A small number to add to avoid division by zero.
"""
def __init__(self, axis=-1, epsilon=1e-6, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.epsilon = epsilon
def build(self, input_shape):
if isinstance(self.axis, list):
shape = tuple([input_shape[dim] for dim in self.axis])
else:
shape = (input_shape[self.axis],)
self.axis = [self.axis]
self.scale = self.add_weight(
name="scale", shape=shape, initializer="ones"
)
self.built = True
def call(self, x):
"""Applies RMS normalization to the input tensor.
Args:
x: Input tensor of shape (batch_size, input_dim).
Returns:
The RMS-normalized tensor of the same shape (batch_size, input_dim),
scaled by the learned `scale` parameter.
"""
return ops.rms_normalization(
x, scale=self.scale, axis=self.axis, epsilon=self.epsilon
)
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"axis": self.axis,
"epsilon": self.epsilon,
}
base_config = super().get_config()
return {**base_config, **config}
| RMSNormalization |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 217614,
"end": 220319
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3]", L_y_: "f32[3, 3]", L_v_: "f32[3, 3]"):
l_x_ = L_x_
l_y_ = L_y_
l_v_ = L_v_
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
aux: "f32[3, 3]" = torch._make_dual(l_x_, l_v_, level = 0); l_x_ = None
_maybe_load_decompositions_1 = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions_1 = None
_make_dual_1: "f32[3, 3]" = torch._make_dual(l_y_, l_v_, level = 0); l_y_ = l_v_ = None
sin: "f32[3, 3]" = aux.sin()
sum_1: "f32[]" = sin.sum(); sin = None
cos: "f32[3, 3]" = _make_dual_1.cos(); _make_dual_1 = None
result_duals: "f32[3, 3]" = sum_1 + cos; sum_1 = cos = None
aux_1: "f32[3, 3]" = torch._C._functorch._unwrap_for_grad(aux, 1); aux = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[3, 3]" = _unpack_dual[0]
dual: "f32[3, 3]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[3, 3]" = torch._C._functorch._unwrap_for_grad(primal, 1); primal = None
tangents_out_unflatten: "f32[3, 3]" = torch._C._functorch._unwrap_for_grad(dual, 1); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
return (primals_out_unflatten, tangents_out_unflatten, aux_1)
""",
)
def test_jvp_two_tensors_disable_grad(self):
counters.clear()
def fn(x):
return x.sin().sum()
def wrapper_fn(x, v):
with torch.autograd.forward_ad._set_fwd_grad_enabled(False):
return torch.func.jvp(fn, (x,), (v,))
x = torch.randn(3, 3)
v = torch.randn(3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | oauthlib__oauthlib | tests/openid/connect/core/test_tokens.py | {
"start": 123,
"end": 6288
} | class ____(TestCase):
def test_create_token_callable_expires_in(self):
"""
Test retrieval of the expires in value by calling the callable expires_in property
"""
expires_in_mock = mock.MagicMock()
request_mock = mock.MagicMock()
token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
token.create_token(request=request_mock)
expires_in_mock.assert_called_once_with(request_mock)
def test_create_token_non_callable_expires_in(self):
"""
When a non callable expires in is set this should just be set to the request
"""
expires_in_mock = mock.NonCallableMagicMock()
request_mock = mock.MagicMock()
token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
token.create_token(request=request_mock)
self.assertFalse(expires_in_mock.called)
self.assertEqual(request_mock.expires_in, expires_in_mock)
def test_create_token_calls_get_id_token(self):
"""
When create_token is called the call should be forwarded to the get_id_token on the token validator
"""
request_mock = mock.MagicMock()
with mock.patch('oauthlib.openid.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator = RequestValidatorMock()
token = JWTToken(expires_in=mock.MagicMock(), request_validator=request_validator)
token.create_token(request=request_mock)
request_validator.get_jwt_bearer_token.assert_called_once_with(None, None, request_mock)
def test_validate_request_token_from_headers(self):
"""
Bearer token get retrieved from headers.
"""
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
mock.patch('oauthlib.openid.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator_mock = RequestValidatorMock()
token = JWTToken(request_validator=request_validator_mock)
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.scopes = mock.MagicMock()
request.headers = {
'Authorization': 'Bearer some-token-from-header'
}
token.validate_request(request=request)
request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-header',
request.scopes,
request)
def test_validate_request_token_from_headers_basic(self):
"""
Wrong kind of token (Basic) retrieved from headers. Confirm token is not parsed.
"""
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
mock.patch('oauthlib.openid.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator_mock = RequestValidatorMock()
token = JWTToken(request_validator=request_validator_mock)
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.scopes = mock.MagicMock()
request.headers = {
'Authorization': 'Basic some-token-from-header'
}
token.validate_request(request=request)
request_validator_mock.validate_jwt_bearer_token.assert_called_once_with(None,
request.scopes,
request)
def test_validate_token_from_request(self):
"""
Token get retrieved from request object.
"""
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
mock.patch('oauthlib.openid.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator_mock = RequestValidatorMock()
token = JWTToken(request_validator=request_validator_mock)
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.scopes = mock.MagicMock()
request.access_token = 'some-token-from-request-object'
request.headers = {}
token.validate_request(request=request)
request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-request-object',
request.scopes,
request)
def test_estimate_type(self):
"""
Estimate type results for a jwt token
"""
def test_token(token, expected_result):
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock:
jwt_token = JWTToken()
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.headers = {
'Authorization': 'Bearer {}'.format(token)
}
result = jwt_token.estimate_type(request=request)
self.assertEqual(result, expected_result)
test_items = (
('eyfoo.foo.foo', 10),
('eyfoo.foo.foo.foo.foo', 10),
('eyfoobar', 0)
)
for token, expected_result in test_items:
test_token(token, expected_result)
| JWTTokenTestCase |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_mixed_precision.py | {
"start": 20721,
"end": 40839
} | class ____(TestFSDPMixedPrecision):
@property
def world_size(self):
return 2
def _get_subtest_config(self) -> dict[str, list[Any]]:
"""Returns a subtest configuration that subtests prefetching settings
together."""
return {
"forward_prefetch": [False, True],
"backward_prefetch": [
None,
BackwardPrefetch.BACKWARD_PRE,
BackwardPrefetch.BACKWARD_POST,
],
}
@skip_if_lt_x_gpu(2)
def test_mixed_precision_no_reshard_after_forward(self):
# Note that we don't exercise all possible different configs so as to
# not increase test TTS too much.
mp = default_mp if not nccl_supports_bf16 else mp_diff_buffer_and_reduce
self._run_test_mixed_precision_e2e(
mp_config=mp,
cpu_offload=CPUOffload(offload_params=True),
backward_prefetch=None,
forward_prefetch=False,
full_precision_param_dtype=torch.float64,
sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
enable_sharded_grad_scaler=False,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_mixed_precision_e2e_full_shard(
self,
mp_config,
cpu_offload,
full_precision_param_dtype,
enable_sharded_grad_scaler,
):
self.run_subtests(
self._get_subtest_config(),
self._run_test_mixed_precision_e2e,
mp_config=mp_config,
cpu_offload=cpu_offload,
full_precision_param_dtype=full_precision_param_dtype,
sharding_strategy=ShardingStrategy.FULL_SHARD,
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
)
def _test_mixed_precision_embedding_table(self, mp_config):
# Basic test to ensure int inputs are not casted which would break
# modules such as embedding tables.
param_dtype = mp_config.param_dtype or torch.float32
orig_reduce_scatter = dist.reduce_scatter_tensor
test_reduce_scatter = partial(
self._reduce_scatter_validate_mp,
orig_reduce_scatter,
mp_config,
True,
)
with patch_reduce_scatter(test_reduce_scatter, param_dtype):
# TODO: `test_mp_embedding_reduce()` fails if we do not wrap the
# entire `TransformerWithSharedParams` with a single top-level FSDP
model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
{"mixed_precision": mp_config},
)
fsdp_model = FSDP(model, mixed_precision=mp_config)
optim = torch.optim.SGD(fsdp_model.parameters(), lr=0.1)
for _ in range(6):
inp = fsdp_model.module.get_input(torch.device("cuda"))
# This would fail if we casted integer module inputs such as for
# embedding tables.
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).cuda()
self.assertEqual(loss.dtype, param_dtype)
fsdp_model.module.run_backward(loss)
optim.step()
@skip_if_lt_x_gpu(2)
def test_mp_embedding_reduce(self):
self._test_mixed_precision_embedding_table(
mp_config=MixedPrecision(reduce_dtype=torch.float16)
)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_only_params_and_bufs(self):
self._test_mixed_precision_embedding_table(
mp_config=MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
)
)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_default(self):
default_mp_config = MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
reduce_dtype=torch.float16,
)
self._test_mixed_precision_embedding_table(mp_config=default_mp_config)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_params_and_reduce_diff(self):
params_and_reduce_different = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float32,
buffer_dtype=torch.float16,
)
self._test_mixed_precision_embedding_table(
mp_config=params_and_reduce_different
)
@skip_if_lt_x_gpu(2)
@skipIfNoTorchVision
def test_mixed_precision_resnet(self):
"""
End to end test to ensure mixed precision + auto_wrap works
for ResNet model.
"""
resnet_model = torchvision.models.resnet50().cuda()
resnet_model = nn.SyncBatchNorm.convert_sync_batchnorm(
resnet_model, process_group=dist.distributed_c10d._get_default_group()
)
n_bn = sum(
1 if isinstance(x, _BatchNorm) else 0 for x in resnet_model.modules()
)
inp = torch.ones(1, 3, 1000, 1000, device="cuda")
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
fsdp = FSDP(
resnet_model,
auto_wrap_policy=size_based_auto_wrap_policy,
mixed_precision=mp_config,
)
# Batchnorm units should be wrapped individually. Validate this by
# ensuring there are equal no. of FSDP units that are BN as BN units
# in original resnet model.
fsdp_bn = 0
for module in fsdp.fsdp_modules(fsdp):
wrapped_module = module.module
if isinstance(wrapped_module, _BatchNorm):
fsdp_bn += 1
self.assertEqual(fsdp_bn, n_bn)
# Would throw type mismatch issue without mixed precision autowrapping.
loss = fsdp(inp).sum()
loss.backward()
@skip_if_lt_x_gpu(2)
def test_grads_reduced_precision(self):
self.run_subtests(
{
"offload_params": [False, True],
"use_orig_params": [False, True],
},
self._test_grads_reduced_precision,
)
@skip_if_lt_x_gpu(2)
@parametrize("convert_sync_bn", [True, False])
def test_mp_batchnorm(self, convert_sync_bn):
class BatchNormNet(nn.Module):
def __init__(self, affine=True):
super().__init__()
self.fc1 = nn.Linear(2, 40, bias=False)
self.bn = nn.BatchNorm1d(4, affine=affine)
self.fc2 = nn.Linear(40, 4, bias=False)
self.ln = nn.LayerNorm(4)
self.fc3 = nn.Linear(4, 4, bias=False)
def forward(self, x):
x = torch.reshape(self.fc1(x), (-1, 4, 10))
x = self.bn(x)
x = torch.reshape(x, (-1, 40))
x = self.fc2(x)
x = self.ln(x)
x = self.fc3(x)
return F.softmax(x, dim=1)
def never_wrap_policy(*args, **kwargs):
return False
net = BatchNormNet().cuda()
if convert_sync_bn:
net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
# FSDP detects that mixed precision + batchnorm will cause issues
# and thus wrap batchnorm in a distinct FSDP unit that does not
# use mixed precision.
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
_module_classes_to_ignore=[_BatchNorm, nn.LayerNorm],
)
with self.assertWarnsRegex(
expected_warning=UserWarning,
expected_regex="These modules will be wrapped as separate FSDP",
):
model = FSDP(
net,
mixed_precision=mp_config,
auto_wrap_policy=never_wrap_policy,
)
no_mp = MixedPrecision()
for mod in [model.ln, model.bn]:
self.assertTrue(isinstance(mod, FSDP))
self.assertEqual(no_mp, mod.mixed_precision)
# policy should not have wrapped any other submodules
for mod in [model.fc1, model.fc2, model.fc3]:
self.assertFalse(isinstance(mod, FSDP))
# Overall mixed precision is still enabled
self.assertEqual(mp_config, model.mixed_precision)
inp = torch.randn((1, 2), device="cuda")
# Without FSDP BN mixed precision fix, this would result in
# RuntimeError: Expected counts to have type Half but got Float
# for syncBN
model(inp).sum().backward()
@skip_if_lt_x_gpu(2)
def test_eval_root_cast_inputs(self):
"""
In a case where root module does not manage FSDP parameters,
ensure that we don't cast forward inputs which could potentially
cause a dtype mismatch. Check that FSDP_USE_FULL_PREC_IN_EVAL controls
this.
"""
low_prec_dtype = torch.float16
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(5, 5)
def forward(self, x, expect_use_full_prec_in_eval):
if expect_use_full_prec_in_eval:
assert x.dtype == torch.float32, f"Expected fp32, got {x.dtype}"
else:
assert x.dtype == low_prec_dtype, (
f"Expected {low_prec_dtype}, got {x.dtype}"
)
return self.a(x)
mp_config = MixedPrecision(
param_dtype=low_prec_dtype,
reduce_dtype=low_prec_dtype,
buffer_dtype=low_prec_dtype,
)
for use_full_prec_in_eval in [True, False]:
os.environ["FSDP_USE_FULL_PREC_IN_EVAL"] = (
"1" if use_full_prec_in_eval else "0"
)
m = MyModel().cuda()
m.a = FSDP(m.a, mixed_precision=mp_config)
model = FSDP(m, mixed_precision=mp_config)
model.eval()
inp = torch.randn(5, 5)
model(inp, use_full_prec_in_eval).sum().backward()
@skip_if_lt_x_gpu(2)
def test_full_precision_in_eval(self):
"""
Tests that eval runs in full precision if FSDP_USE_FULL_PREC_IN_EVAL is set.
"""
for (
cast_forward_inputs,
use_full_prec_in_eval,
) in itertools.product([True, False], [True, False]):
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
cast_forward_inputs=cast_forward_inputs,
)
os.environ["FSDP_USE_FULL_PREC_IN_EVAL"] = (
"1" if use_full_prec_in_eval else "0"
)
model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
{"mixed_precision": mp_config},
)
inp = model.get_input(torch.device("cuda"))
output = model(*inp)
loss = model.get_loss(inp, output).cuda()
# Loss should be in fp16
self.assertEqual(torch.float16, loss.dtype)
model.run_backward(loss)
# Grads should be in fp32 as we upcast them
for p in model.parameters():
if p.grad is not None:
self.assertEqual(torch.float32, p.grad.dtype)
# Now in eval mode, loss should be fp32 if use_full_prec_in_eval is set.
model.eval()
inp = model.get_input(torch.device("cuda"))
output = model(*inp)
loss = model.get_loss(inp, output).cuda()
expected_dtype = torch.float32 if use_full_prec_in_eval else torch.float16
self.assertEqual(expected_dtype, loss.dtype)
@skip_if_lt_x_gpu(2)
def test_full_precision_in_eval_buffers(self):
"""
Tests that when model.eval() and FSDP_USE_FULL_PREC_IN_EVAL is set,
buffers are in the full precision.
"""
for (
cast_forward_inputs,
use_full_prec_in_eval,
) in itertools.product([True, False], [True, False]):
os.environ["FSDP_USE_FULL_PREC_IN_EVAL"] = (
"1" if use_full_prec_in_eval else "0"
)
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
cast_forward_inputs=cast_forward_inputs,
)
model_getter = self._get_simple_nested_model
fsdp_model = model_getter(
param_dtype=torch.float32,
run_checks=False,
mixed_precision=mp_config,
)
inp = torch.randn(3, 10, device="cuda")
fsdp_model((inp, self, fsdp_model, mp_config, torch.float32))
for buf in fsdp_model.buffers():
self.assertEqual(torch.float16, buf.dtype)
# model.eval() + forward pass should make the buffers in full prec again
# Add pre-forward hooks
def verify_eval_buffer_dtype(module, input):
expected_dtype = (
_BUFFER_ORIG_DTYPE if use_full_prec_in_eval else torch.float16
)
for buf in module.buffers():
self.assertEqual(expected_dtype, buf.dtype)
def _get_underlying_module(m):
return m.module if isinstance(m, FSDP) else m
hook_handles = []
hook_handles.append(
_get_underlying_module(fsdp_model[0]).register_forward_pre_hook(
verify_eval_buffer_dtype
)
)
hook_handles.append(
_get_underlying_module(fsdp_model[1]).register_forward_pre_hook(
verify_eval_buffer_dtype
)
)
fsdp_model.eval()
fsdp_model((inp, self, fsdp_model, mp_config, torch.float32))
for hook_handle in hook_handles:
hook_handle.remove()
expected_dtype = (
_BUFFER_ORIG_DTYPE if use_full_prec_in_eval else torch.float16
)
for buf in fsdp_model.buffers():
self.assertEqual(expected_dtype, buf.dtype)
# model.train() + forward again should make buffers in fp16
fsdp_model.train()
fsdp_model((inp, self, fsdp_model, mp_config, torch.float32))
for buf in fsdp_model.buffers():
self.assertEqual(torch.float16, buf.dtype)
@skip_if_lt_x_gpu(2)
def test_full_precision_in_eval_comm(self):
for (
cast_forward_inputs,
use_full_prec_in_eval,
) in itertools.product([True, False], [True, False]):
os.environ["FSDP_USE_FULL_PREC_IN_EVAL"] = (
"1" if use_full_prec_in_eval else "0"
)
mp_config = MixedPrecision(
param_dtype=torch.float32,
reduce_dtype=torch.float16,
buffer_dtype=torch.float32,
cast_forward_inputs=cast_forward_inputs,
# cast reduction for batchnorm also just in this test, to make
# validation easier.
_module_classes_to_ignore=[],
)
model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
{"mixed_precision": mp_config},
)
# Patch reduce_scatter to add validation for mixed precision types.
orig_reduce_scatter = dist.reduce_scatter_tensor
test_reduce_scatter = partial(
self._reduce_scatter_validate_mp,
orig_reduce_scatter,
mp_config,
not use_full_prec_in_eval,
)
model.eval()
with patch_reduce_scatter(test_reduce_scatter, torch.float32):
inp = model.get_input(torch.device("cuda"))
output = model(*inp)
loss = model.get_loss(inp, output).cuda()
model.run_backward(loss)
@skip_if_lt_x_gpu(2)
def test_input_grads_with_param_mixed_precision(self):
"""
Tests that input tensors that require gradients do get their gradients
even after being cast to a low precision (when parameter mixed
precision is enabled).
"""
self.run_subtests(
{
"sharding_strategy": [
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
ShardingStrategy.NO_SHARD,
],
"use_orig_params": [False, True],
},
self._test_input_grads_with_param_mixed_precision,
)
def _test_input_grads_with_param_mixed_precision(
self,
sharding_strategy: ShardingStrategy,
use_orig_params: bool,
):
model = nn.Linear(1024, 1024, bias=False)
mixed_precision = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float32,
buffer_dtype=torch.float32,
)
fsdp_model = FSDP(
model,
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
device_id=torch.cuda.current_device(),
use_orig_params=use_orig_params,
)
# Use an input with dtype not equal to the mixed precision
# `param_dtype` so that it gets cast
x_float = torch.randn(
(32, 1024),
device="cuda",
dtype=torch.float32,
requires_grad=True,
)
fsdp_model(x_float).sum().backward()
self.assertTrue(x_float.grad is not None)
# Check that `x_float` preserves its dtype, meaning that the gradient
# propagated via `ToCopyBackward0`
self.assertEqual(x_float.grad.dtype, torch.float32)
@skip_if_lt_x_gpu(2)
def test_buffer_dtype_no_root_handle(self):
class NonLearnableConv(nn.Module):
def __init__(self, kernel, in_channels: int):
super().__init__()
self.padding = (len(kernel) - 1) // 2
kernel = torch.tensor(kernel, dtype=torch.float32)
kernel = kernel / kernel.sum()
kernel = kernel.outer(kernel)[None, None].repeat(in_channels, 1, 1, 1)
self.register_buffer("kernel", kernel)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return nn.functional.conv2d(
x,
self.kernel,
groups=self.kernel.shape[0],
stride=2,
padding=self.padding,
)
model = nn.Sequential(
nn.Sequential(nn.Conv2d(3, 64, 3, padding=1)),
nn.Sequential(NonLearnableConv((1, 2, 2, 1), 64)),
nn.Sequential(nn.Conv2d(64, 3, 3, padding=1)),
nn.Sequential(NonLearnableConv((1, 2, 2, 1), 3)),
).cuda()
dtype = torch.float16
model = FSDP(
module=model,
device_id=self.rank,
use_orig_params=True,
limit_all_gathers=True,
auto_wrap_policy=ModuleWrapPolicy({nn.Sequential}),
mixed_precision=MixedPrecision(
param_dtype=dtype,
buffer_dtype=dtype,
reduce_dtype=dtype,
),
)
# Check that we can run forward/backward without dtype errors
x = torch.randn(2, 3, 128, 128, device="cuda")
out = model(x)
out.mean().backward()
| TestFSDPMixedPrecisionSharded |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 113732,
"end": 114712
} | class ____:
xlSmartTagControlActiveX = 13 # from enum XlSmartTagControlType
xlSmartTagControlButton = 6 # from enum XlSmartTagControlType
xlSmartTagControlCheckbox = 9 # from enum XlSmartTagControlType
xlSmartTagControlCombo = 12 # from enum XlSmartTagControlType
xlSmartTagControlHelp = 3 # from enum XlSmartTagControlType
xlSmartTagControlHelpURL = 4 # from enum XlSmartTagControlType
xlSmartTagControlImage = 8 # from enum XlSmartTagControlType
xlSmartTagControlLabel = 7 # from enum XlSmartTagControlType
xlSmartTagControlLink = 2 # from enum XlSmartTagControlType
xlSmartTagControlListbox = 11 # from enum XlSmartTagControlType
xlSmartTagControlRadioGroup = 14 # from enum XlSmartTagControlType
xlSmartTagControlSeparator = 5 # from enum XlSmartTagControlType
xlSmartTagControlSmartTag = 1 # from enum XlSmartTagControlType
xlSmartTagControlTextbox = 10 # from enum XlSmartTagControlType
| SmartTagControlType |
python | paramiko__paramiko | paramiko/agent.py | {
"start": 7798,
"end": 9002
} | class ____:
"""
Class proxying request as a client:
#. client ask for a request_forward_agent()
#. server creates a proxy and a fake SSH Agent
#. server ask for establishing a connection when needed,
calling the forward_agent_handler at client side.
#. the forward_agent_handler launch a thread for connecting
the remote fake agent and the local agent
#. Communication occurs ...
"""
def __init__(self, chanRemote):
self._conn = None
self.__chanR = chanRemote
self.thread = AgentRemoteProxy(self, chanRemote)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
"""
Method automatically called by ``AgentProxyThread.run``.
"""
conn = get_agent_connection()
if not conn:
return
self._conn = conn
def close(self):
"""
Close the current connection and terminate the agent
Should be called manually
"""
if hasattr(self, "thread"):
self.thread._exit = True
self.thread.join(1000)
if self._conn is not None:
self._conn.close()
| AgentClientProxy |
python | django__django | tests/constraints/models.py | {
"start": 982,
"end": 1665
} | class ____(models.Model):
name = models.CharField(max_length=255, null=True)
price = models.IntegerField(null=True)
discounted_price = models.IntegerField(null=True)
rebate = models.GeneratedField(
expression=Coalesce("price", 0)
- Coalesce("discounted_price", Coalesce("price", 0)),
output_field=models.IntegerField(),
db_persist=True,
)
lower_name = models.GeneratedField(
expression=Lower(models.F("name")),
output_field=models.CharField(max_length=255, null=True),
db_persist=True,
)
class Meta:
required_db_features = {"supports_stored_generated_columns"}
| GeneratedFieldStoredProduct |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/components.py | {
"start": 4412,
"end": 5743
} | class ____(Decoder):
"""
Decoder strategy that attempts to decompress a response using GZIP first and then parses the resulting
document as JSON. Also as a backup, this works for uncompressed responses that are already in JSON format
"""
parameters: InitVar[Mapping[str, Any]]
def is_stream_response(self) -> bool:
return False
def decode(self, response: requests.Response) -> Generator[MutableMapping[str, Any], None, None]:
try:
document = gzip.decompress(response.content).decode("iso-8859-1")
except gzip.BadGzipFile:
document = response.content.decode("iso-8859-1")
try:
body_json = json.loads(document)
yield from self.parse_body_json(body_json)
except requests.exceptions.JSONDecodeError:
logger.warning(f"Response cannot be parsed into json: {response.status_code=}, {response.text=}")
yield {}
@staticmethod
def parse_body_json(
body_json: MutableMapping[str, Any] | List[MutableMapping[str, Any]],
) -> Generator[MutableMapping[str, Any], None, None]:
if not isinstance(body_json, list):
body_json = [body_json]
if len(body_json) == 0:
yield {}
else:
yield from body_json
@dataclass
| GzipJsonDecoder |
python | GoogleCloudPlatform__python-docs-samples | compute/client_library/ingredients/instances/custom_machine_types/helper_class.py | {
"start": 1019,
"end": 8317
} | class ____:
"""
Allows to create custom machine types to be used with the VM instances.
"""
@unique
class CPUSeries(Enum):
N1 = "custom"
N2 = "n2-custom"
N2D = "n2d-custom"
E2 = "e2-custom"
E2_MICRO = "e2-custom-micro"
E2_SMALL = "e2-custom-small"
E2_MEDIUM = "e2-custom-medium"
TypeLimits = namedtuple(
"TypeLimits",
[
"allowed_cores",
"min_mem_per_core",
"max_mem_per_core",
"allow_extra_memory",
"extra_memory_limit",
],
)
# The limits for various CPU types are described on:
# https://cloud.google.com/compute/docs/general-purpose-machines
LIMITS = {
CPUSeries.E2: TypeLimits(frozenset(range(2, 33, 2)), 512, 8192, False, 0),
CPUSeries.E2_MICRO: TypeLimits(frozenset(), 1024, 2048, False, 0),
CPUSeries.E2_SMALL: TypeLimits(frozenset(), 2048, 4096, False, 0),
CPUSeries.E2_MEDIUM: TypeLimits(frozenset(), 4096, 8192, False, 0),
CPUSeries.N2: TypeLimits(
frozenset(range(2, 33, 2)).union(set(range(36, 129, 4))),
512,
8192,
True,
gb_to_mb(624),
),
CPUSeries.N2D: TypeLimits(
frozenset({2, 4, 8, 16, 32, 48, 64, 80, 96}), 512, 8192, True, gb_to_mb(768)
),
CPUSeries.N1: TypeLimits(
frozenset({1}.union(range(2, 97, 2))), 922, 6656, True, gb_to_mb(624)
),
}
def __init__(
self, zone: str, cpu_series: CPUSeries, memory_mb: int, core_count: int = 0
):
self.zone = zone
self.cpu_series = cpu_series
self.limits = self.LIMITS[self.cpu_series]
# Shared machine types (e2-small, e2-medium and e2-micro) always have
# 2 vCPUs: https://cloud.google.com/compute/docs/general-purpose-machines#e2_limitations
self.core_count = 2 if self.is_shared() else core_count
self.memory_mb = memory_mb
self._checked = False
self._check_parameters()
self.extra_memory_used = self._check_extra_memory()
def is_shared(self):
return self.cpu_series in (
CustomMachineType.CPUSeries.E2_SMALL,
CustomMachineType.CPUSeries.E2_MICRO,
CustomMachineType.CPUSeries.E2_MEDIUM,
)
def _check_extra_memory(self) -> bool:
if self._checked:
return self.memory_mb > self.core_count * self.limits.max_mem_per_core
else:
raise RuntimeError(
"You need to call _check_parameters() before calling _check_extra_memory()"
)
def _check_parameters(self):
"""
Check whether the requested parameters are allowed. Find more information about limitations of custom machine
types at: https://cloud.google.com/compute/docs/general-purpose-machines#custom_machine_types
"""
# Check the number of cores
if (
self.limits.allowed_cores
and self.core_count not in self.limits.allowed_cores
):
raise RuntimeError(
f"Invalid number of cores requested. Allowed number of cores for {self.cpu_series.name} is: {sorted(self.limits.allowed_cores)}"
)
# Memory must be a multiple of 256 MB
if self.memory_mb % 256 != 0:
raise RuntimeError("Requested memory must be a multiple of 256 MB.")
# Check if the requested memory isn't too little
if self.memory_mb < self.core_count * self.limits.min_mem_per_core:
raise RuntimeError(
f"Requested memory is too low. Minimal memory for {self.cpu_series.name} is {self.limits.min_mem_per_core} MB per core."
)
# Check if the requested memory isn't too much
if self.memory_mb > self.core_count * self.limits.max_mem_per_core:
if self.limits.allow_extra_memory:
if self.memory_mb > self.limits.extra_memory_limit:
raise RuntimeError(
f"Requested memory is too large.. Maximum memory allowed for {self.cpu_series.name} is {self.limits.extra_memory_limit} MB."
)
else:
raise RuntimeError(
f"Requested memory is too large.. Maximum memory allowed for {self.cpu_series.name} is {self.limits.max_mem_per_core} MB per core."
)
self._checked = True
def __str__(self) -> str:
"""
Return the custom machine type in form of a string acceptable by Compute Engine API.
"""
if self.cpu_series in {
self.CPUSeries.E2_SMALL,
self.CPUSeries.E2_MICRO,
self.CPUSeries.E2_MEDIUM,
}:
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.memory_mb}"
if self.extra_memory_used:
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.core_count}-{self.memory_mb}-ext"
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.core_count}-{self.memory_mb}"
def short_type_str(self) -> str:
"""
Return machine type in a format without the zone. For example, n2-custom-0-10240.
This format is used to create instance templates.
"""
return str(self).rsplit("/", maxsplit=1)[1]
@classmethod
def from_str(cls, machine_type: str):
"""
Construct a new object from a string. The string needs to be a valid custom machine type like:
- https://www.googleapis.com/compute/v1/projects/diregapic-mestiv/zones/us-central1-b/machineTypes/e2-custom-4-8192
- zones/us-central1-b/machineTypes/e2-custom-4-8192
- e2-custom-4-8192 (in this case, the zone parameter will not be set)
"""
zone = None
if machine_type.startswith("http"):
machine_type = machine_type[machine_type.find("zones/") :]
if machine_type.startswith("zones/"):
_, zone, _, machine_type = machine_type.split("/")
extra_mem = machine_type.endswith("-ext")
if machine_type.startswith("custom"):
cpu = cls.CPUSeries.N1
_, cores, memory = machine_type.rsplit("-", maxsplit=2)
else:
if extra_mem:
cpu_series, _, cores, memory, _ = machine_type.split("-")
else:
cpu_series, _, cores, memory = machine_type.split("-")
if cpu_series == "n2":
cpu = cls.CPUSeries.N2
elif cpu_series == "n2d":
cpu = cls.CPUSeries.N2D
elif cpu_series == "e2":
cpu = cls.CPUSeries.E2
if cores == "micro":
cpu = cls.CPUSeries.E2_MICRO
cores = 2
elif cores == "small":
cpu = cls.CPUSeries.E2_SMALL
cores = 2
elif cores == "medium":
cpu = cls.CPUSeries.E2_MEDIUM
cores = 2
else:
raise RuntimeError("Unknown CPU series.")
cores = int(cores)
memory = int(memory)
return cls(zone, cpu, memory, cores)
# </INGREDIENT>
| CustomMachineType |
python | openai__openai-python | src/openai/types/chat/completion_create_params.py | {
"start": 14442,
"end": 15341
} | class ____(TypedDict, total=False):
name: Required[str]
"""The name of the function to be called.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
of 64.
"""
description: str
"""
A description of what the function does, used by the model to choose when and
how to call the function.
"""
parameters: FunctionParameters
"""The parameters the functions accepts, described as a JSON Schema object.
See the [guide](https://platform.openai.com/docs/guides/function-calling) for
examples, and the
[JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
documentation about the format.
Omitting `parameters` defines a function with an empty parameter list.
"""
ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
| Function |
python | getsentry__sentry-python | sentry_sdk/integrations/pyramid.py | {
"start": 1491,
"end": 5583
} | class ____(Integration):
identifier = "pyramid"
origin = f"auto.http.{identifier}"
transaction_style = ""
def __init__(self, transaction_style="route_name"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
from pyramid import router
old_call_view = router._call_view
@functools.wraps(old_call_view)
def sentry_patched_call_view(registry, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Response
integration = sentry_sdk.get_client().get_integration(PyramidIntegration)
if integration is None:
return old_call_view(registry, request, *args, **kwargs)
_set_transaction_name_and_source(
sentry_sdk.get_current_scope(), integration.transaction_style, request
)
scope = sentry_sdk.get_isolation_scope()
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
return old_call_view(registry, request, *args, **kwargs)
router._call_view = sentry_patched_call_view
if hasattr(Request, "invoke_exception_view"):
old_invoke_exception_view = Request.invoke_exception_view
def sentry_patched_invoke_exception_view(self, *args, **kwargs):
# type: (Request, *Any, **Any) -> Any
rv = old_invoke_exception_view(self, *args, **kwargs)
if (
self.exc_info
and all(self.exc_info)
and rv.status_int == 500
and sentry_sdk.get_client().get_integration(PyramidIntegration)
is not None
):
_capture_exception(self.exc_info)
return rv
Request.invoke_exception_view = sentry_patched_invoke_exception_view
old_wsgi_call = router.Router.__call__
@ensure_integration_enabled(PyramidIntegration, old_wsgi_call)
def sentry_patched_wsgi_call(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
def sentry_patched_inner_wsgi_call(environ, start_response):
# type: (Dict[str, Any], Callable[..., Any]) -> Any
try:
return old_wsgi_call(self, environ, start_response)
except Exception:
einfo = sys.exc_info()
_capture_exception(einfo)
reraise(*einfo)
middleware = SentryWsgiMiddleware(
sentry_patched_inner_wsgi_call,
span_origin=PyramidIntegration.origin,
)
return middleware(environ, start_response)
router.Router.__call__ = sentry_patched_wsgi_call
@ensure_integration_enabled(PyramidIntegration)
def _capture_exception(exc_info):
# type: (ExcInfo) -> None
if exc_info[0] is None or issubclass(exc_info[0], HTTPException):
return
event, hint = event_from_exception(
exc_info,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "pyramid", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (sentry_sdk.Scope, str, Request) -> None
try:
name_for_style = {
"route_name": request.matched_route.name,
"route_pattern": request.matched_route.pattern,
}
scope.set_transaction_name(
name_for_style[transaction_style],
source=SOURCE_FOR_STYLE[transaction_style],
)
except Exception:
pass
| PyramidIntegration |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 22295,
"end": 22413
} | class ____(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
| BasicModelSearchIndex |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 3222,
"end": 3258
} | class ____(Fine):
...
| FineSubclass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/base_request_builder.py | {
"start": 766,
"end": 1875
} | class ____(AmazonAdsRequestBuilder):
def __init__(self, resource: str) -> None:
self._resource: str = resource
self._client_access_token: str = None
self._client_id: str = None
self._profile_id: str = None
@property
def url(self) -> str:
return f"{BASE_URL}/{self._resource}"
@property
def headers(self):
return (super().headers or {}) | {
"Amazon-Advertising-API-ClientId": self._client_id,
"Amazon-Advertising-API-Scope": self._profile_id,
"Authorization": f"Bearer {self._client_access_token}",
}
def with_client_access_token(self, client_access_token: str) -> "AmazonAdsBaseRequestBuilder":
self._client_access_token: str = client_access_token
return self
def with_client_id(self, client_id: str) -> "AmazonAdsBaseRequestBuilder":
self._client_id: str = client_id
return self
def with_profile_id(self, profile_id: str) -> "AmazonAdsBaseRequestBuilder":
self._profile_id: str = str(profile_id)
return self
| AmazonAdsBaseRequestBuilder |
python | sphinx-doc__sphinx | sphinx/pycode/parser.py | {
"start": 21666,
"end": 23095
} | class ____:
"""Python source code parser to pick up variable comments.
This is a better wrapper for ``VariableCommentPicker``.
"""
def __init__(self, code: str, encoding: str = 'utf-8') -> None:
self.code = filter_whitespace(code)
self.encoding = encoding
self.annotations: dict[tuple[str, str], str] = {}
self.comments: dict[tuple[str, str], str] = {}
self.deforders: dict[str, int] = {}
self.definitions: dict[str, tuple[str, int, int]] = {}
self.finals: list[str] = []
self.overloads: dict[str, list[Signature]] = {}
def parse(self) -> None:
"""Parse the source code."""
self.parse_comments()
self.parse_definition()
def parse_comments(self) -> None:
"""Parse the code and pick up comments."""
tree = ast.parse(self.code, type_comments=True)
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
picker.visit(tree)
self.annotations = picker.annotations
self.comments = picker.comments
self.deforders = picker.deforders
self.finals = picker.finals
self.overloads = picker.overloads
def parse_definition(self) -> None:
"""Parse the location of definitions from the code."""
parser = DefinitionFinder(self.code.splitlines(True))
parser.parse()
self.definitions = parser.definitions
| Parser |
python | pyca__cryptography | tests/hazmat/primitives/test_hash_vectors.py | {
"start": 5520,
"end": 6558
} | class ____:
test_shake128 = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "SHAKE"),
["SHAKE128LongMsg.rsp", "SHAKE128ShortMsg.rsp"],
hashes.SHAKE128(digest_size=16),
)
def test_shake128_variable(self, backend, subtests):
vectors = _load_all_params(
os.path.join("hashes", "SHAKE"),
["SHAKE128VariableOut.rsp"],
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
output_length = int(vector["outputlen"]) // 8
msg = binascii.unhexlify(vector["msg"])
shake = hashes.SHAKE128(digest_size=output_length)
m = hashes.Hash(shake, backend=backend)
m.update(msg)
assert m.finalize() == binascii.unhexlify(vector["output"])
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(
hashes.SHAKE256(digest_size=32)
),
skip_message="Does not support SHAKE256",
)
| TestSHAKE128 |
python | numpy__numpy | numpy/distutils/fcompiler/mips.py | {
"start": 120,
"end": 1714
} | class ____(FCompiler):
compiler_type = 'mips'
description = 'MIPSpro Fortran Compiler'
version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["<F90>", "-version"],
'compiler_f77' : ["f77", "-f77"],
'compiler_fix' : ["f90", "-fixedform"],
'compiler_f90' : ["f90"],
'linker_so' : ["f90", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : None
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['-KPIC']
def get_flags(self):
return self.pic_flags + ['-n32']
def get_flags_opt(self):
return ['-O3']
def get_flags_arch(self):
opt = []
for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
if getattr(cpu, 'is_IP%s'%a)():
opt.append('-TARG:platform=IP%s' % a)
break
return opt
def get_flags_arch_f77(self):
r = None
if cpu.is_r10000(): r = 10000
elif cpu.is_r12000(): r = 12000
elif cpu.is_r8000(): r = 8000
elif cpu.is_r5000(): r = 5000
elif cpu.is_r4000(): r = 4000
if r is not None:
return ['r%s' % (r)]
return []
def get_flags_arch_f90(self):
r = self.get_flags_arch_f77()
if r:
r[0] = '-' + r[0]
return r
if __name__ == '__main__':
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='mips').get_version())
| MIPSFCompiler |
python | apache__airflow | providers/qdrant/tests/integration/qdrant/hooks/test_qdrant.py | {
"start": 1002,
"end": 2507
} | class ____:
def setup_method(self):
self.test_collection_name = "test-hook-collection"
self.test_collection_dimension = random.randint(100, 2000)
self.hook = QdrantHook()
self.hook.conn.recreate_collection(
self.test_collection_name,
vectors_config=models.VectorParams(
size=self.test_collection_dimension, distance=models.Distance.MANHATTAN
),
)
def test_connection(self):
response, message = self.hook.verify_connection()
assert response
assert message == "Connection established!", "Successfully connected to Qdrant."
def test_upsert_points(self):
vectors = np.random.rand(100, self.test_collection_dimension)
self.hook.conn.upsert(
self.test_collection_name,
points=[
models.PointStruct(
id=idx, vector=vector.tolist(), payload={"color": "red", "rand_number": idx % 10}
)
for idx, vector in enumerate(vectors)
],
)
assert self.hook.conn.count(self.test_collection_name).count == 100
def test_delete_points(self):
self.hook.conn.delete(
self.test_collection_name,
points_selector=models.Filter(
must=[models.FieldCondition(key="color", match=models.MatchValue(value="red"))]
),
)
assert self.hook.conn.count(self.test_collection_name).count == 0
| TestQdrant |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 58124,
"end": 61294
} | class ____(nn.Module):
"""Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
def __init__(self, config, num_attention_heads=None):
super().__init__()
if config.hidden_size % num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({num_attention_heads})"
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(config.hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(
self,
queries: torch.Tensor,
keys: torch.Tensor,
values: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = queries.shape
query_layer = (
self.query(queries)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
)
value_layer = (
self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in GroundingDinoModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
context_layer = self.out_proj(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
| GroundingDinoMultiheadAttention |
python | tornadoweb__tornado | tornado/test/simple_httpclient_test.py | {
"start": 3895,
"end": 4049
} | class ____(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
@abstract_base_test
| RespondInPrepareHandler |
python | pyparsing__pyparsing | examples/inv_regex.py | {
"start": 2378,
"end": 7970
} | class ____:
def __init__(self, lit):
self.lit = lit
def __str__(self):
return "Lit:" + self.lit
def __repr__(self):
return "Lit:" + self.lit
def make_generator(self):
def lit_gen():
yield self.lit
return lit_gen
def handle_range(toks):
return CharacterRangeEmitter(srange(toks[0]))
def handle_repetition(toks):
toks = toks[0]
if toks[1] in "*+":
raise ParseFatalException("", 0, "unbounded repetition operators not supported")
if toks[1] == "?":
return OptionalEmitter(toks[0])
if "count" in toks:
return GroupEmitter([toks[0]] * int(toks.count))
if "minCount" in toks:
mincount = int(toks.minCount)
maxcount = int(toks.maxCount)
optcount = maxcount - mincount
if optcount:
opt = OptionalEmitter(toks[0])
for i in range(1, optcount):
opt = OptionalEmitter(GroupEmitter([toks[0], opt]))
return GroupEmitter([toks[0]] * mincount + [opt])
else:
return [toks[0]] * mincount
def handle_literal(toks):
lit = ""
for t in toks:
if t[0] == "\\":
if t[1] == "t":
lit += "\t"
else:
lit += t[1]
else:
lit += t
return LiteralEmitter(lit)
def handle_macro(toks):
macro_char = toks[0][1]
if macro_char == "d":
return CharacterRangeEmitter("0123456789")
elif macro_char == "w":
return CharacterRangeEmitter(srange("[A-Za-z0-9_]"))
elif macro_char == "s":
return LiteralEmitter(" ")
else:
raise ParseFatalException(
"", 0, "unsupported macro character (" + macro_char + ")"
)
def handle_sequence(toks):
return GroupEmitter(toks[0])
def handle_dot():
return CharacterRangeEmitter(printables)
def handle_alternative(toks):
return AlternativeEmitter(toks[0])
_parser = None
def parser():
global _parser
if _parser is None:
ParserElement.set_default_whitespace_chars("")
lbrack, rbrack, lbrace, rbrace, lparen, rparen, colon, qmark = Literal.using_each(
"[]{}():?"
)
re_macro = Combine("\\" + one_of("d w s"))
escaped_char = ~re_macro + Combine("\\" + one_of(list(printables)))
re_literal_char = (
"".join(c for c in printables if c not in r"\[]{}().*?+|") + " \t"
)
re_range = Combine(lbrack + SkipTo(rbrack, ignore=escaped_char) + rbrack) # type: ignore
re_literal = escaped_char | one_of(list(re_literal_char))
re_non_capture_group = Suppress("?:")
re_dot = Literal(".")
repetition = (
(lbrace + Word(nums)("count") + rbrace)
| (lbrace + Word(nums)("minCount") + "," + Word(nums)("maxCount") + rbrace)
| one_of(list("*+?"))
)
re_range.add_parse_action(handle_range)
re_literal.add_parse_action(handle_literal)
re_macro.add_parse_action(handle_macro)
re_dot.add_parse_action(handle_dot)
re_term = re_literal | re_range | re_macro | re_dot | re_non_capture_group
re_expr = infix_notation(
re_term,
[
(repetition, 1, OpAssoc.LEFT, handle_repetition),
(Empty(), 2, OpAssoc.LEFT, handle_sequence),
(Suppress("|"), 2, OpAssoc.LEFT, handle_alternative),
],
)
_parser = re_expr
return _parser
def count(gen):
"""Simple function to count the number of elements returned by a generator."""
return sum(1 for _ in gen)
def invert(regex):
r"""
Call this routine as a generator to return all the strings that
match the input regular expression.
for s in invert(r"[A-Z]{3}\d{3}"):
print s
"""
invre = GroupEmitter(parser().parse_string(regex)).make_generator()
return invre()
def main():
tests = r"""
[A-EA]
[A-D]*
[A-D]{3}
X[A-C]{3}Y
X[A-C]{3}\(
X\d
foobar\d\d
foobar{2}
foobar{2,9}
fooba[rz]{2}
(foobar){2}
([01]\d)|(2[0-5])
(?:[01]\d)|(2[0-5])
([01]\d\d)|(2[0-4]\d)|(25[0-5])
[A-C]{1,2}
[A-C]{0,3}
[A-C]\s[A-C]\s[A-C]
[A-C]\s?[A-C][A-C]
[A-C]\s([A-C][A-C])
[A-C]\s([A-C][A-C])?
[A-C]{2}\d{2}
@|TH[12]
@(@|TH[12])?
@(@|TH[12]|AL[12]|SP[123]|TB(1[0-9]?|20?|[3-9]))?
@(@|TH[12]|AL[12]|SP[123]|TB(1[0-9]?|20?|[3-9])|OH(1[0-9]?|2[0-9]?|30?|[4-9]))?
(([ECMP]|HA|AK)[SD]|HS)T
[A-CV]{2}
A[cglmrstu]|B[aehikr]?|C[adeflmorsu]?|D[bsy]|E[rsu]|F[emr]?|G[ade]|H[efgos]?|I[nr]?|Kr?|L[airu]|M[dgnot]|N[abdeiop]?|Os?|P[abdmortu]?|R[abefghnu]|S[bcegimnr]?|T[abcehilm]|Uu[bhopqst]|U|V|W|Xe|Yb?|Z[nr]
(a|b)|(x|y)
(a|b) (x|y)
[ABCDEFG](?:#|##|b|bb)?(?:maj|min|m|sus|aug|dim)?[0-9]?(?:/[ABCDEFG](?:#|##|b|bb)?)?
(Fri|Mon|S(atur|un)|T(hur|ue)s|Wednes)day
A(pril|ugust)|((Dec|Nov|Sept)em|Octo)ber|(Febr|Jan)uary|Ju(ly|ne)|Ma(rch|y)
""".splitlines()
for t in tests:
t = t.strip()
if not t:
continue
print("-" * 50)
print(t)
try:
num = count(invert(t))
print(num)
maxprint = 30
for s in invert(t):
print(s)
maxprint -= 1
if not maxprint:
break
except ParseFatalException as pfe:
print(pfe.msg)
print("")
continue
print("")
if __name__ == "__main__":
main()
| LiteralEmitter |
python | ray-project__ray | release/tune_tests/fault_tolerance_tests/workloads/terminate_node_aws.py | {
"start": 2085,
"end": 4415
} | class ____:
def __init__(
self,
probability: float = 0.1,
time_between_checks_s: float = 60,
warmup_time_s: float = 0,
) -> None:
self.probability = probability
self.time_between_checks_s = time_between_checks_s
self.warmup_time_s = warmup_time_s
self.last_fail_check = None
self.history = []
logging.basicConfig(level=logging.INFO)
self.start_killing()
def start_killing(self):
time.sleep(self.warmup_time_s)
while True:
if random.random() < self.probability:
self.kill()
time.sleep(self.time_between_checks_s)
def kill(self):
failures = 0
max_failures = 3
node = None
terminated_successfully = False
while not terminated_successfully and failures < max_failures:
try:
node = get_random_node()
if not node:
logger.info("No alive worker nodes")
continue
terminate_node(node["NodeID"])
terminated_successfully = True
logger.info(
f"Killed node {node['NodeID']} with IP {node['NodeManagerAddress']}"
)
except Exception:
failures += 1
logger.exception(
"Killing random node failed in attempt "
f"{failures}. "
f"Retrying {max_failures - failures} more times"
)
self.history.append(
{
"timestamp": time.time(),
"node": node,
"terminated_successfully": terminated_successfully,
}
)
# safe_write_to_results_json(self.history)
def create_instance_killer(
probability: float = 0.1,
time_between_checks_s: float = 60,
warmup_time_s: float = 0,
):
killer_actor_cls = InstanceKillerActor.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(), soft=False
),
)
actor = killer_actor_cls.remote(
probability=probability,
time_between_checks_s=time_between_checks_s,
warmup_time_s=warmup_time_s,
)
return actor
| InstanceKillerActor |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 3888,
"end": 3942
} | class ____:
def method(self):
print()
| Parent |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-zenguard/llama_index/packs/zenguard/base.py | {
"start": 148,
"end": 497
} | class ____(BaseLlamaPack):
def __init__(self, config: ZenGuardConfig):
self._zenguard = ZenGuard(config)
def get_modules(self) -> Dict[str, Any]:
return {"zenguard": self._zenguard}
def run(self, prompt: str, detectors: List[Detector]) -> Dict[str, Any]:
return self._zenguard.detect(detectors, prompt)
| ZenGuardPack |
python | squidfunk__mkdocs-material | material/plugins/group/plugin.py | {
"start": 1628,
"end": 7109
} | class ____(BasePlugin[GroupConfig]):
supports_multiple_instances = True
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize object attributes
self.is_serve = False
self.is_dirty = False
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
self.is_dirty = dirty
# If the group is enabled, conditionally load plugins - at first, this might
# sound easier than it actually is, as we need to jump through some hoops to
# ensure correct ordering among plugins. We're effectively initializing the
# plugins that are part of the group after all MkDocs finished initializing
# all other plugins, so we need to patch the order of the methods. Moreover,
# we must use MkDocs existing plugin collection, or we might have collisions
# with other plugins that are not part of the group. As so often, this is a
# little hacky, but has huge potential making plugin configuration easier.
# There's one little caveat: the `__init__` and `on_startup` methods of the
# plugins that are part of the group are called after all other plugins, so
# the `event_priority` decorator for `on_startup` methods is effectively
# useless. However, the `on_startup` method is only intended to set up the
# plugin and doesn't receive anything else than the invoked command and
# whether we're running a dirty build, so there should be no problems.
@event_priority(150)
def on_config(self, config):
if not self.config.enabled:
return
# Retrieve plugin collection from configuration
option: Plugins = dict(config._schema)["plugins"]
assert isinstance(option, Plugins)
# Load all plugins in group
self.plugins: dict[str, BasePlugin] = {}
try:
for name, plugin in self._load(option):
self.plugins[name] = plugin
# The plugin could not be loaded, likely because it's not installed or
# misconfigured, so we raise a plugin error for a nicer error message
except Exception as e:
raise PluginError(str(e))
# Patch order of plugin methods
for events in option.plugins.events.values():
self._patch(events, config)
# Invoke `on_startup` event for plugins in group
command = "serve" if self.is_serve else "build"
for method in option.plugins.events["startup"]:
plugin = self._get_plugin(method)
# Ensure that we have a method bound to a plugin (and not a hook)
if plugin and plugin in self.plugins.values():
method(command = command, dirty = self.is_dirty)
# -------------------------------------------------------------------------
# Retrieve plugin instance for bound method or nothing
def _get_plugin(self, method: Callable):
return getattr(method, "__self__", None)
# Retrieve priority of plugin method
def _get_priority(self, method: Callable):
return getattr(method, "mkdocs_priority", 0)
# Retrieve position of plugin
def _get_position(self, plugin: BasePlugin, config: MkDocsConfig) -> int:
for at, (_, candidate) in enumerate(config.plugins.items()):
if plugin == candidate:
return at
# -------------------------------------------------------------------------
# Load plugins that are part of the group
def _load(self, option: Plugins):
for name, data in option._parse_configs(self.config.plugins):
yield option.load_plugin_with_namespace(name, data)
# -------------------------------------------------------------------------
# Patch order of plugin methods - all other plugin methods are already in
# the right order, so we only need to check those that are part of the group
# and bubble them up into the right location. Some plugin methods may define
# priorities, so we need to make sure to order correctly within those.
def _patch(self, methods: list[Callable], config: MkDocsConfig):
position = self._get_position(self, config)
for at in reversed(range(1, len(methods))):
tail = methods[at - 1]
head = methods[at]
# Skip if the plugin is not part of the group
plugin = self._get_plugin(head)
if not plugin or plugin not in self.plugins.values():
continue
# Skip if the previous method has a higher priority than the current
# one, because we know we can't swap them anyway
if self._get_priority(tail) > self._get_priority(head):
continue
# Ensure that we have a method bound to a plugin (and not a hook)
plugin = self._get_plugin(tail)
if not plugin:
continue
# Both methods have the same priority, so we check if the ordering
# of both methods is violated, and if it is, swap them
if (position < self._get_position(plugin, config)):
methods[at], methods[at - 1] = tail, head
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.group")
| GroupPlugin |
python | viewflow__viewflow | tests/components/test_field_password.py | {
"start": 1022,
"end": 1666
} | class ____(forms.Form):
field = forms.CharField(widget=forms.PasswordInput)
urlpatterns = [
path(
"",
Site(
viewsets=[
Application(
title="Test Application",
urlpatterns=[
path(
"form/",
FormView.as_view(
form_class=TestForm,
template_name="tests/components.html",
),
)
],
),
]
).urls,
)
]
| TestForm |
python | python-openxml__python-docx | src/docx/styles/style.py | {
"start": 7957,
"end": 8051
} | class ____(BaseStyle):
"""A numbering style.
Not yet implemented.
"""
| _NumberingStyle |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 12339,
"end": 12439
} | class ____(OpcodeWithArg):
_FLAGS = HAS_JABS | HAS_ARGUMENT | NO_NEXT
__slots__ = ()
| JUMP_ABSOLUTE |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 21177,
"end": 21236
} | class ____(DeepseekV3NaiveMoe):
pass
| Glm4vMoeTextNaiveMoe |
python | django__django | tests/forms_tests/widget_tests/test_colorinput.py | {
"start": 68,
"end": 307
} | class ____(WidgetTest):
widget = ColorInput()
def test_render(self):
self.check_html(
self.widget,
"color",
"",
html="<input type='color' name='color'>",
)
| ColorInputTest |
python | kamyu104__LeetCode-Solutions | Python/shifting-letters.py | {
"start": 29,
"end": 454
} | class ____(object):
def shiftingLetters(self, S, shifts):
"""
:type S: str
:type shifts: List[int]
:rtype: str
"""
result = []
times = sum(shifts) % 26
for i, c in enumerate(S):
index = ord(c) - ord('a')
result.append(chr(ord('a') + (index+times) % 26))
times = (times-shifts[i]) % 26
return "".join(result)
| Solution |
python | joke2k__faker | faker/providers/address/sl_SI/__init__.py | {
"start": 45,
"end": 41728
} | class ____(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = ("{{street_name}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("###", "##", "#", "#a", "#b", "#c")
postcode_formats = ("####",)
cities = (
"Ajdovščina",
"Bled",
"Bovec",
"Brežice",
"Celje",
"Cerknica",
"Črnomelj",
"Domžale",
"Dravograd",
"Gornja Radgona",
"Gornji Grad",
"Grosuplje",
"Hrastnik",
"Idrija",
"Ilirska Bistrica",
"Izola",
"Jesenice",
"Kamnik",
"Kobarid",
"Kočevje",
"Koper",
"Kostanjevica na Krki",
"Kranj",
"Krško",
"Laško",
"Lenart v Slovenskih goricah",
"Lendava",
"Litija",
"Ljubljana",
"Ljutomer",
"Logatec",
"Maribor",
"Medvode",
"Mengeš",
"Metlika",
"Mežica",
"Murska Sobota",
"Nova Gorica",
"Novo mesto",
"Ormož",
"Piran",
"Postojna",
"Prevalje",
"Ptuj",
"Radeče",
"Radovljica",
"Ravne na Koroškem",
"Ribnica",
"Rogaška Slatina",
"Ruše",
"Sevnica",
"Sežana",
"Slovenj Gradec",
"Slovenska Bistrica",
"Slovenske Konjice",
"Šempeter pri Gorici",
"Šentjur",
"Škofja Loka",
"Šoštanj",
"Tolmin",
"Trbovlje",
"Trebnje",
"Tržič",
"Turnišče",
"Velenje",
"Vipava",
"Vipavski Križ",
"Višnja Gora",
"Vrhnika",
"Zagorje ob Savi",
"Žalec",
"Železniki",
"Žiri",
)
streets = (
"Abramova ulica",
"Adamičeva ulica",
"Adamič-Lundrovo nabrežje",
"Ajdovščina",
"Aleševa ulica",
"Alešovčeva ulica",
"Aljaževa ulica",
"Ambrožev trg",
"Ameriška ulica",
"Andrićeva ulica",
"Anžurjeva ulica",
"Apihova ulica",
"Argentinska ulica",
"Arharjeva cesta",
"Arkova ulica",
"Artačeva ulica",
"Aškerčeva cesta",
"Avčinova ulica",
"Avsečeva ulica",
"Avstrijska ulica",
"Avšičeva cesta",
"Ažmanova ulica",
"Babičeva ulica",
"Badjurova ulica",
"Balinarska pot",
"Baragova ulica",
"Barjanska cesta",
"Bavdkova ulica",
"Baznikova ulica",
"Bazoviška ulica",
"Beethovnova ulica",
"Belačeva ulica",
"Beljaška ulica",
"Berčičeva ulica",
"Berčonova pot",
"Berdajsova ulica",
"Bernekerjeva ulica",
"Bernikova ulica",
"Betettova cesta",
"Bezenškova ulica",
"Bežigrad",
"Bičevje",
"Bilečanska ulica",
"Bitenčeva ulica",
"Bizjakova ulica",
"Bizjanova ulica",
"Bizovški štradon",
"Blasnikova ulica",
"Blasov breg",
"Bleiweisova cesta",
"Bobenčkova ulica",
"Bobrova ulica",
"Bognarjeva pot",
"Bohinjčeva ulica",
"Bohoričeva ulica",
"Boletova ulica",
"Bolgarska ulica",
"Borovniška ulica",
"Borštnikov trg",
"Borutova ulica",
"Božičeva ulica",
"Brankova ulica",
"Bratinova ulica",
"Bratislavska cesta",
"Bratov Jakopičev ulica",
"Bratov Kunovarjev ulica",
"Bravničarjeva ulica",
"Brdnikova ulica",
"Breg",
"Bregarjeva ulica",
"Breznikova ulica",
"Brglezov štradon",
"Brilejeva ulica",
"Brodarjev trg",
"Brodska cesta",
"Burnikova ulica",
"Cankarjev vrh",
"Cankarjevo nabrežje",
"Carja Dušana ulica",
"Celarčeva ulica",
"Celjska ulica",
"Celovška cesta",
"Cerkniška ulica",
"Cerutova ulica",
"Cesta Andreja Bitenca",
"Cesta Ceneta Štuparja",
"Cesta Dolomitskega odreda",
"Cesta II. grupe odredov",
"Cesta Ljubljanske brigade",
"Cesta na Bellevue",
"Cesta na Bokalce",
"Cesta na Brinovec",
"Cesta na Brod",
"Cesta na Ježah",
"Cesta na Kope",
"Cesta na Laze",
"Cesta na Loko",
"Cesta na Mesarico",
"Cesta na Ozare",
"Cesta na Poljane",
"Cesta na Prevoje",
"Cesta na Urh",
"Cesta na Vrhovce",
"Cesta slov. kmečkih uporov",
"Cesta Urške Zatlerjeve",
"Cesta v Dvor",
"Cesta v Gameljne",
"Cesta v Hrastje",
"Cesta v hrib",
"Cesta v Kleče",
"Cesta v Kostanj",
"Cesta v Legarico",
"Cesta v Mestni log",
"Cesta v Pečale",
"Cesta v Prod",
"Cesta v Rožno dolino",
"Cesta v Šmartno",
"Cesta v Zeleni log",
"Cesta v Zgornji log",
"Cesta vstaje",
"Cesta 24. junija",
"Cesta 25 talcev",
"Cesta 27. aprila",
"Chengdujska cesta",
"Chopinov prehod",
"Cigaletova ulica",
"Cilenškova ulica",
"Cimermanova ulica",
"Cimpermanova ulica",
"Cizejeva ulica",
"Clevelandska ulica",
"Colnarjeva ulica",
"Cvetlična pot",
"Čampova ulica",
"Čanžekova ulica",
"Čargova ulica",
"Čebelarska ulica",
"Čehova ulica",
"Čepelnikova ulica",
"Čepovanska ulica",
"Čerinova ulica",
"Černigojeva ulica",
"Černivčeva ulica",
"Červanova ulica",
"Čevljarska ulica",
"Čižmanova ulica",
"Čopova ulica",
"Črna pot",
"Črnuška cesta",
"Črtomirova ulica",
"Čučkova ulica",
"Dajnkova ulica",
"Dalmatinova ulica",
"Danile Kumarjeve ulica",
"Dečkova ulica",
"Dečmanova ulica",
"Delakova ulica",
"Demšarjeva cesta",
"Derčeva ulica",
"Dergančeva ulica",
"Dermotova ulica",
"Detelova ulica",
"Devinska ulica",
"Devova ulica",
"Divjakova ulica",
"Do proge",
"Dobrajčeva ulica",
"Dobrdobska ulica",
"Dolenjska cesta",
"Dolgi breg",
"Dolgi most",
"Dolharjeva ulica",
"Dolinarjeva ulica",
"Dolinškova ulica",
"Dolničarjeva ulica",
"Dolomitska ulica",
"Drabosnjakova ulica",
"Draga",
"Draveljska ulica",
"Dražgoška ulica",
"Drenikov vrh",
"Drenikova ulica",
"Dunajska cesta",
"Dvojna ulica",
"Dvorakova ulica",
"Dvorni trg",
"Eipprova ulica",
"Ellerjeva ulica",
"Emonska cesta",
"Erbežnikova ulica",
"Erjavčeva cesta",
"Fabianijeva ulica",
"Fani Grumove ulica",
"Ferberjeva ulica",
"Filipičeva ulica",
"Flajšmanova ulica",
"Flandrova ulica",
"Forsterjeva ulica",
"Franketova ulica",
"Frankopanska ulica",
"Frenkova pot",
"Friškovec",
"Funtkova ulica",
"Fužinska cesta",
"Gabrov trg",
"Gača",
"Galičeva ulica",
"Galjevica",
"Gallusovo nabrežje",
"Gasilska cesta",
"Gasparijeva ulica",
"Gašperšičeva ulica",
"Gerbičeva ulica",
"Gestrinova ulica",
"Glavarjeva ulica",
"Gledališka stolba",
"Glinška ulica",
"Glinškova ploščad",
"Glonarjeva ulica",
"Gmajnice",
"Gobarska pot",
"Godeževa ulica",
"Gola Loka",
"Golarjeva ulica",
"Goljarjeva pot",
"Golouhova ulica",
"Goriška ulica",
"Gorjančeva ulica",
"Gorjupova ulica",
"Gornji Rudnik I",
"Gornji Rudnik II",
"Gornji Rudnik III",
"Gornji trg",
"Goropečnikova ulica",
"Gortanova ulica",
"Gospodinjska ulica",
"Gosposka ulica",
"Gosposvetska cesta",
"Govekarjeva ulica",
"Gozdna pot",
"Grablovičeva ulica",
"Gradišče",
"Gradnikova ulica",
"Grafenauerjeva ulica",
"Grajski drevored",
"Grajzerjeva ulica",
"Gramozna pot",
"Grassellijeva ulica",
"Gregorčičeva ulica",
"Gregorinova ulica",
"Grintovška ulica",
"Grobeljca",
"Grobeljska pot",
"Groharjeva cesta",
"Groznikova ulica",
"Grška ulica",
"Grško",
"Gruberjevo nabrežje",
"Grudnovo nabrežje",
"Gubčeva ulica",
"Gunceljska cesta",
"Gustinčarjeva ulica",
"Gustinčičeva ulica",
"Hacetova ulica",
"Hafnerjeva ulica",
"Hajdrihova ulica",
"Hauptmanca",
"Hladilniška pot",
"Hladnikova cesta",
"Hlebčeva ulica",
"Hotimirova ulica",
"Hradeckega cesta",
"Hranilniška ulica",
"Hribarjevo nabrežje",
"Hribernikova ulica",
"Hribovska pot",
"Hrvaška ulica",
"Hrvatski trg",
"Hubadova ulica",
"Hudourniška pot",
"Idrijska ulica",
"Igriška ulica",
"Ilešičeva ulica",
"Ilovški štradon",
"Industrijska cesta",
"Ingličeva ulica",
"Italijanska ulica",
"Izletniška ulica",
"Ižanska cesta",
"Jakčeva ulica",
"Jakhljeva ulica",
"Jakopičev drevored",
"Jakopičevo sprehajališče",
"Jakšičeva ulica",
"Jalnova ulica",
"Jamova cesta",
"Janežičeva cesta",
"Janova ulica",
"Janševa ulica",
"Jarčeva ulica",
"Jarnikova ulica",
"Jarše",
"Jarška cesta",
"Javorškova ulica",
"Jazbečeva pot",
"Jelinčičeva ulica",
"Jenkova ulica",
"Jensenova ulica",
"Jerajeva ulica",
"Jeranova ulica",
"Jesenkova ulica",
"Jesihov štradon",
"Jezerska ulica",
"Ježa",
"Ježica",
"Joškov štradon",
"Jurčičev trg",
"Jurčkova cesta",
"Juričeva ulica",
"Juvanova ulica",
"K reaktorju",
"Kadilnikova ulica",
"Kajuhova ulica",
"Kalingerjeva ulica",
"Kalinova ulica",
"Kaminova ulica",
"Kamniška ulica",
"Kamnogoriška cesta",
"Kančeva ulica",
"Kanonijeva cesta",
"Kantetova ulica",
"Kapusova ulica",
"Kardeljeva ploščad",
"Karingerjeva ulica",
"Karunova ulica",
"Kastelčeva ulica",
"Kašeljska cesta",
"Kavadarska cesta",
"Kavčičeva ulica",
"Kavškova ulica",
"Kekčeva ulica",
"Kermaunerjeva ulica",
"Kernova cesta",
"Kerševanova ulica",
"Keržičeva ulica",
"Kettejeva ulica",
"Kladezna ulica",
"Klančarjeva ulica",
"Kleče",
"Klemenova ulica",
"Kleparska steza",
"Ključavničarska ulica",
"Klunova ulica",
"Kmečka pot",
"Knafljev prehod",
"Knezov štradon",
"Knezova ulica",
"Knobleharjeva ulica",
"Koblarjeva ulica",
"Kocbekova ulica",
"Kocenova ulica",
"Kocjanova ulica",
"Kočenska ulica",
"Kodrova ulica",
"Kogojeva ulica",
"Kogovškova ulica",
"Kokaljeva ulica",
"Kolarjeva ulica",
"Kolesarska pot",
"Koleševa ulica",
"Kolinska ulica",
"Kolmanova ulica",
"Kolodvorska ulica",
"Komanova ulica",
"Komenskega ulica",
"Kongresni trg",
"Kopališka ulica",
"Kopitarjeva ulica",
"Kopna pot",
"Koprska ulica",
"Koreninova ulica",
"Koroška ulica",
"Korotanska ulica",
"Kosančeva ulica",
"Koseskega ulica",
"Koseška cesta",
"Kosmačeva ulica",
"Kosova ulica",
"Kosovelova ulica",
"Koširjeva ulica",
"Kotnikova ulica",
"Kovačeva ulica",
"Kovaška ulica",
"Kovinarska ulica",
"Kozakova ulica",
"Kozinova ulica",
"Kozlarjeva pot",
"Koželjeva ulica",
"Krakovski nasip",
"Kraljeva ulica",
"Kranerjeva ulica",
"Kraška ulica",
"Kratka pot",
"Kratka steza",
"Kregarjeva ulica",
"Kreljeva ulica",
"Kremžarjeva ulica",
"Krimska ulica",
"Krištofova ulica",
"Kriva pot",
"Krivec",
"Križevniška soteska",
"Križna ulica",
"Krmčeva ulica",
"Krmeljeva ulica",
"Kropova ulica",
"Krošljeva ulica",
"Krovska ulica",
"Krožna pot",
"Kržičeva ulica",
"Kudrova ulica",
"Kuhljeva cesta",
"Kumerdejeva ulica",
"Kumerjeve ulica",
"Kumrovška ulica",
"Kurilniška ulica",
"Kurirska ulica",
"Kusoldova ulica",
"Kuštrinova ulica",
"Kuzeletova ulica",
"Kuzmičeva ulica",
"Lahova pot",
"Lajovčeva ulica",
"Laknerjeva ulica",
"Lakotence",
"Lampetova ulica",
"Lamutova ulica",
"Langusova ulica",
"Latinski trg",
"Lavrinova ulica",
"Layerjeva ulica",
"Lazarjeva ulica",
"Legatova ulica",
"Lemeževa ulica",
"Lepi pot",
"Lepodvorska ulica",
"Leskovičeva ulica",
"Letališka cesta",
"Levarjeva ulica",
"Levičnikova ulica",
"Levstikov trg",
"Levstikova ulica",
"Linhartov podhod",
"Linhartova cesta",
"Lipahova ulica",
"Litijska cesta",
"Litostrojska cesta",
"Livada",
"Livarska ulica",
"Ločnikarjeva ulica",
"Lončarska steza",
"Lorenzova cesta",
"Lovrenčičeva ulica",
"Lovska ulica",
"Lovšetova ulica",
"Lubejeva ulica",
"Luize Pesjakove ulica",
"Lunačkova ulica",
"Mačja steza",
"Mačkov kot",
"Mačkova ulica",
"Madžarska ulica",
"Magistrova ulica",
"Maistrova ulica",
"Majaronova ulica",
"Majde Vrhovnikove ulica",
"Majorja Lavriča ulica",
"Makucova ulica",
"Mala ulica",
"Mala vas",
"Malejeva ulica",
"Malenškova ulica",
"Malgajeva ulica",
"Mali štradon",
"Mali trg",
"Malnarjeva ulica",
"Marčenkova ulica",
"Marentičeva ulica",
"Mareška pot",
"Marice Kovačeve ulica",
"Marincljeva ulica",
"Marinovševa cesta",
"Maroltova ulica",
"Martina Krpana ulica",
"Martinčeva ulica",
"Martinova ulica",
"Marušičeva ulica",
"Masarykova cesta",
"Matjanova pot",
"Matjaževa ulica",
"Maurerjeva ulica",
"Mazovčeva pot",
"Med hmeljniki",
"Medarska ulica",
"Medenska cesta",
"Medveščkova ulica",
"Mekinčeva ulica",
"Melikova ulica",
"Mencingerjeva ulica",
"Merčnikova ulica",
"Merosodna ulica",
"Mesesnelova ulica",
"Mestni trg",
"Meškova ulica",
"Metelkova ulica",
"Miheličeva cesta",
"Mihov štradon",
"Miklavčeva ulica",
"Miklošičeva cesta",
"Mikuževa ulica",
"Milčetova pot",
"Mire Lenardičeve ulica",
"Mirje",
"Mirna pot",
"Mislejeva ulica",
"Mizarska pot",
"Mladinska ulica",
"Mlake",
"Mlinska pot",
"Močnikova ulica",
"Mokrška ulica",
"Molekova ulica",
"Moškričeva ulica",
"Mrharjeva ulica",
"Mrzelova ulica",
"Murkova ulica",
"Murnikova ulica",
"Murnova ulica",
"Muzejska ulica",
"Na cvetači",
"Na delih",
"Na dolih",
"Na gaju",
"Na gmajni",
"Na Herši",
"Na jami",
"Na klančku",
"Na Korošci",
"Na Palcah",
"Na požaru",
"Na produ",
"Na Rojah",
"Na Stolbi",
"Na Straški vrh",
"Na Trati",
"Na Žalah",
"Nade Ovčakove ulica",
"Nadgoriška cesta",
"Nahlikova ulica",
"Nahtigalova ulica",
"Nanoška ulica",
"Nazorjeva ulica",
"Nebotičnikov prehod",
"Nedohova ulica",
"Njegoševa cesta",
"Nova ulica",
"Novakova pot",
"Novakova ulica",
"Novi trg",
"Novinarska ulica",
"Novo naselje",
"Novo Polje, cesta I",
"Novo Polje, cesta III",
"Novo Polje, cesta IV",
"Novo Polje, cesta V",
"Novo Polje, cesta VI",
"Novo Polje, cesta VII",
"Novo Polje, cesta X",
"Novo Polje, cesta XI",
"Novo Polje, cesta XII",
"Novo Polje, cesta XIV",
"Novo Polje, cesta XIX",
"Novo Polje, cesta XVI",
"Novo Polje, cesta XVII",
"Novo Polje, cesta XXI",
"Novo Polje, cesta XXIII",
"Novosadska ulica",
"Ob daljnovodu",
"Ob dolenjski železnici",
"Ob Farjevcu",
"Ob Ljubljanici",
"Ob Mejašu",
"Ob potoku",
"Ob pristanu",
"Ob Savi",
"Ob studencu",
"Ob zdravstvenem domu",
"Ob zeleni jami",
"Ob zelenici",
"Ob žici",
"Obirska ulica",
"Obrežna steza",
"Obrije",
"Ocvirkova ulica",
"Ogrinčeva ulica",
"Okiškega ulica",
"Omahnova ulica",
"Omejčeva ulica",
"Omersova ulica",
"Oražnova ulica",
"Orlova ulica",
"Osenjakova ulica",
"Osojna pot",
"Osojna steza",
"Osterčeva ulica",
"Ovčakova ulica",
"Pahorjeva ulica",
"Palmejeva ulica",
"Papirniška pot",
"Park Ajdovščina",
"Park Arturo Toscanini",
"Parmova ulica",
"Parmska cesta",
"Partizanska ulica",
"Pavlovčeva ulica",
"Pavšičeva ulica",
"Pečarjeva ulica",
"Pečnik",
"Pečnikova ulica",
"Pegamova ulica",
"Perčeva ulica",
"Periška cesta",
"Perkova ulica",
"Peršinova cesta",
"Pesarska cesta",
"Pestotnikova ulica",
"Peščena pot",
"Petkova ulica",
"Petkovškovo nabrežje",
"Petrčeva ulica",
"Pilonova ulica",
"Pionirska pot",
"Pipanova pot",
"Pirnatova ulica",
"Planinska cesta",
"Planinškova ulica",
"Plečnikov podhod",
"Plemljeva ulica",
"Plešičeva ulica",
"Pleteršnikova ulica",
"Pločanska ulica",
"Pod akacijami",
"Pod bregom",
"Pod bresti",
"Pod bukvami",
"Pod Debnim vrhom",
"Pod gabri",
"Pod gozdom",
"Pod hrasti",
"Pod hribom",
"Pod hruško",
"Pod jelšami",
"Pod jezom",
"Pod ježami",
"Pod Kamno gorico",
"Pod klancem",
"Pod lipami",
"Pod topoli",
"Pod Trančo",
"Pod turnom",
"Pod vrbami",
"Podgornikova ulica",
"Podgorska cesta",
"Podgrajska cesta",
"Podjunska ulica",
"Podlimbarskega ulica",
"Podmilščakova ulica",
"Podrožniška pot",
"Podsmreška cesta",
"Podutiška cesta",
"Pogačarjev trg",
"Pohlinova ulica",
"Poklukarjeva ulica",
"Polakova ulica",
"Polanškova ulica",
"Poljanska cesta",
"Polje",
"Polje, cesta I",
"Polje, cesta II",
"Polje, cesta III",
"Polje, cesta VI",
"Polje, cesta VIII",
"Polje, cesta X",
"Polje, cesta XIV",
"Polje, cesta XL",
"Polje, cesta XLII",
"Polje, cesta XLVI",
"Polje, cesta XVI",
"Polje, cesta XVIII",
"Polje, cesta XXII",
"Polje, cesta XXIV",
"Polje, cesta XXVI",
"Polje, cesta XXX",
"Polje, cesta XXXII",
"Polje, cesta XXXIV",
"Polje, cesta XXXVIII",
"Poljedelska ulica",
"Poljska pot",
"Porentova ulica",
"Posavskega ulica",
"Postojnska ulica",
"Pot do šole",
"Pot Draga Jakopiča",
"Pot heroja Trtnika",
"Pot k igrišču",
"Pot k ribniku",
"Pot k Savi",
"Pot k sejmišču",
"Pot k studencu",
"Pot na Breje",
"Pot na Drenikov vrh",
"Pot na Golovec",
"Pot na goro",
"Pot na Gradišče",
"Pot na Grič",
"Pot na Labar",
"Pot na mah",
"Pot na most",
"Pot na Orle",
"Pot na Visoko",
"Pot na Zduše",
"Pot Rdečega križa",
"Pot v boršt",
"Pot v Čeželj",
"Pot v dolino",
"Pot v Goričico",
"Pot v hribec",
"Pot v mejah",
"Pot v Mlake",
"Pot v Podgorje",
"Pot v Zeleni gaj",
"Pot za Brdom",
"Pot za razori",
"Potokarjeva ulica",
"Potrčeva ulica",
"Povšetova ulica",
"Prašnikarjeva ulica",
"Praznikova ulica",
"Pražakova ulica",
"Pred Savljami",
"Predjamska cesta",
"Predor pod Gradom",
"Preglov trg",
"Prekmurska ulica",
"Prelčeva ulica",
"Preloge",
"Premrlova ulica",
"Preradovićeva ulica",
"Preserska ulica",
"Prešernov trg",
"Prešernova cesta",
"Pretnarjeva ulica",
"Pri borštu",
"Pri brvi",
"Pri malem kamnu",
"Pri mostiščarjih",
"Pribinova ulica",
"Prijateljeva ulica",
"Primorska ulica",
"Prinčičeva ulica",
"Prisojna ulica",
"Prištinska ulica",
"Privoz",
"Proletarska cesta",
"Prule",
"Prušnikova ulica",
"Prvomajska ulica",
"Pšatnik",
"Pšatska pot",
"Ptujska ulica",
"Pučnikova ulica",
"Puharjeva ulica",
"Puhova ulica",
"Puhtejeva ulica",
"Puterlejeva ulica",
"Putrihova ulica",
"Raičeva ulica",
"Rakovniška ulica",
"Rakuševa ulica",
"Ramovševa ulica",
"Ravbarjeva ulica",
"Ravna pot",
"Ravnikova ulica",
"Razgledna steza",
"Reber",
"Reboljeva ulica",
"Rečna ulica",
"Regentova cesta",
"Resljeva cesta",
"Reška ulica",
"Ribičičeva ulica",
"Ribji trg",
"Ribniška ulica",
"Rimska cesta",
"Rjava cesta",
"Robbova ulica",
"Robičeva ulica",
"Rodičeva ulica",
"Rojčeva ulica",
"Romavhova ulica",
"Rosna pot",
"Rotarjeva ulica",
"Rovšnikova ulica",
"Rozmanova ulica",
"Rožanska ulica",
"Rožičeva ulica",
"Rožna dolina, cesta I",
"Rožna dolina, cesta III",
"Rožna dolina, cesta IV",
"Rožna dolina, cesta V",
"Rožna dolina, cesta VI",
"Rožna dolina, cesta VIII",
"Rožna dolina, cesta X",
"Rožna dolina, cesta XII",
"Rožna dolina, cesta XIII",
"Rožna dolina, cesta XV",
"Rožna dolina, cesta XVII",
"Rožna ulica",
"Rudnik I",
"Rudnik II",
"Rudnik III",
"Runkova ulica",
"Ruska ulica",
"Rutarjeva ulica",
"Sadinja vas",
"Sajovčeva ulica",
"Samova ulica",
"Saškova ulica",
"Sattnerjeva ulica",
"Savinova ulica",
"Savinškova ulica",
"Savlje",
"Savska cesta",
"Sedejeva ulica",
"Selanov trg",
"Selanova ulica",
"Setnikarjeva ulica",
"Seunigova ulica",
"Simončičeva ulica",
"Siva pot",
"Skapinova ulica",
"Sketova ulica",
"Skopčeva ulica",
"Skrbinškova ulica",
"Slape",
"Slapnikova ulica",
"Slavčja ulica",
"Slomškova ulica",
"Slovenčeva ulica",
"Slovenska cesta",
"Smoletova ulica",
"Smrekarjeva ulica",
"Smrtnikova ulica",
"Snebersko nabrežje",
"Snežniška ulica",
"Snojeva ulica",
"Sojerjeva ulica",
"Sončna pot",
"Sostrska cesta",
"Soška ulica",
"Soteška pot",
"Soussenska ulica",
"Sovretova ulica",
"Spodnji Rudnik I",
"Spodnji Rudnik II",
"Spodnji Rudnik III",
"Spodnji Rudnik V",
"Spomeniška pot",
"Srebrničeva ulica",
"Srednja pot",
"Stadionska ulica",
"Staničeva ulica",
"Stara Ježica",
"Stara slovenska ulica",
"Stare Črnuče",
"Stari trg",
"Stegne",
"Steletova ulica",
"Sternadova ulica",
"Stiška ulica",
"Stolpniška ulica",
"Stoženska ulica",
"Stožice",
"Stražarjeva ulica",
"Streliška ulica",
"Stritarjeva ulica",
"Strmeckijeva ulica",
"Strmi pot",
"Strniševa cesta",
"Strossmayerjeva ulica",
"Strugarska ulica",
"Strupijevo nabrežje",
"Suhadolčanova ulica",
"Sulčja ulica",
"Svetčeva ulica",
"Šarhova ulica",
"Šentjakob",
"Šentviška ulica",
"Šerkova ulica",
"Šestova ulica",
"Šibeniška ulica",
"Šinkov štradon",
"Šišenska cesta",
"Šivičeva ulica",
"Škerljeva ulica",
"Škofova ulica",
"Škrabčeva ulica",
"Šlandrova ulica",
"Šlosarjeva ulica",
"Šmarna gora",
"Šmartinska cesta",
"Šmartno",
"Španova pot",
"Španska ulica",
"Štajerska cesta",
"Štebijeva cesta",
"Štefančeva ulica",
"Štembalova ulica",
"Štepanjska cesta",
"Štepanjsko nabrežje",
"Štirnova ulica",
"Štradon čez Prošco",
"Štrekljeva ulica",
"Študentovska ulica",
"Štukljeva cesta",
"Štula",
"Šturmova ulica",
"Šubičeva ulica",
"Šumarjeva ulica",
"Švabićeva ulica",
"Švarova ulica",
"Švegljeva cesta",
"Tabor",
"Tacenska cesta",
"Tavčarjeva ulica",
"Tbilisijska ulica",
"Tesarska ulica",
"Teslova ulica",
"Tesna ulica",
"Tesovnikova ulica",
"Tiha ulica",
"Tiranova ulica",
"Tischlerjeva ulica",
"Tivolska cesta",
"Tkalska ulica",
"Tobačna ulica",
"Tolminska ulica",
"Tomačevo",
"Tomačevska cesta",
"Tomažičeva ulica",
"Tometova ulica",
"Tominškova ulica",
"Tomišeljska ulica",
"Toplarniška ulica",
"Topniška ulica",
"Torkarjeva ulica",
"Tratnikova ulica",
"Travniška ulica",
"Trbeže",
"Trdinova ulica",
"Trebušakova ulica",
"Trg francoske revolucije",
"Trg mladih",
"Trg mladinskih delov. brigad",
"Trg narodnih herojev",
"Trg prekomorskih brigad",
"Trg republike",
"Trg 9. maja",
"Trinkova ulica",
"Trnovčeva ulica",
"Trnovska ulica",
"Trpinčeva ulica",
"Trstenjakova ulica",
"Trtnikova ulica",
"Tržaška cesta",
"Tržna ulica",
"Tugomerjeva ulica",
"Turnerjeva ulica",
"Turnsko nabrežje",
"Udvančeva ulica",
"Ulica aktivistov",
"Ulica Alme Sodnik",
"Ulica Andreja Kumarja",
"Ulica Angelce Ocepkove",
"Ulica Angele Ljubičeve",
"Ulica borca Petra",
"Ulica borcev za severno mejo",
"Ulica bratov Bezlajev",
"Ulica bratov Blanč",
"Ulica bratov Jančar",
"Ulica bratov Komel",
"Ulica bratov Kraljič",
"Ulica bratov Martinec",
"Ulica bratov Novak",
"Ulica bratov Rozmanov",
"Ulica bratov Škofov",
"Ulica bratov Učakar",
"Ulica bratov Židan",
"Ulica Dušana Kraigherja",
"Ulica Ernesta Kramerja",
"Ulica Franca Nebca",
"Ulica Francke Jerasove",
"Ulica Franja Novaka",
"Ulica gledališča BTC",
"Ulica Goce Delčeva",
"Ulica Gubčeve brigade",
"Ulica Hermana Potočnika",
"Ulica Ivana Roba",
"Ulica Ivanke Kožuh",
"Ulica Ivice Pirjevčeve",
"Ulica Janeza Pavla II.",
"Ulica Janeza Rožiča",
"Ulica Jožeta Jame",
"Ulica Jožeta Japlja",
"Ulica Jožeta Mirtiča",
"Ulica Konrada Babnika",
"Ulica Koroškega bataljona",
"Ulica Lizike Jančarjeve",
"Ulica Lojzeta Spacala",
"Ulica Lovre Klemenčiča",
"Ulica Malči Beličeve",
"Ulica Marije Drakslerjeve",
"Ulica Marije Hvaličeve",
"Ulica Marje Boršnikove",
"Ulica Marka Šlajmerja",
"Ulica Milana Majcna",
"Ulica Milke Kerinove",
"Ulica Minke Bobnar",
"Ulica Mirka Jurce",
"Ulica Mirka Tomšiča",
"Ulica Miroslava Turka",
"Ulica Molniške čete",
"Ulica na Grad",
"Ulica Nade Čamernikove",
"Ulica Olge Mohorjeve",
"Ulica padlih borcev",
"Ulica Pariške komune",
"Ulica Pohorskega bataljona",
"Ulica Polonce Čude",
"Ulica prvoborcev",
"Ulica Rezke Dragarjeve",
"Ulica Rezke Klopčič",
"Ulica Rudolfa Janežiča",
"Ulica Staneta Severja",
"Ulica Štefke Zbašnikove",
"Ulica talcev",
"Ulica Tončke Čečeve",
"Ulica v Kokovšek",
"Ulica Vide Pregarčeve",
"Ulica Vladimirja Trampuža",
"Ulica Zore Ragancinove",
"Ulica Žanke Erjavec",
"Ulica 15. aprila",
"Ulica 15. maja",
"Ulica 24. avgusta",
"Ulica 4. julija",
"Ulica 7. septembra",
"Ulica 9. junija",
"Uršičev štradon",
"Usnjarska ulica",
"V Češnjico",
"V dolini",
"V Karlovce",
"V Karlovce",
"V Kladeh",
"V Murglah",
"V Sige",
"V Varde",
"V Zalar",
"Vagajeva ulica",
"Valjavčeva ulica",
"Valvasorjeva ulica",
"Vandotova ulica",
"Vaška pot",
"Večna pot",
"Vegova ulica",
"Velebitska ulica",
"Veliki štradon",
"Velikovška ulica",
"Velnarjeva ulica",
"Verovškova ulica",
"Veršičeva ulica",
"Veselova ulica",
"Videmska ulica",
"Vidergarjeva ulica",
"Vidičeva ulica",
"Vidovdanska cesta",
"Vilharjev podhod",
"Vilharjeva cesta",
"Vinterca",
"Vipavska ulica",
"Vipotnikova ulica",
"Viška cesta",
"Vižmarska pot",
"Vodmatska ulica",
"Vodmatski trg",
"Vodna steza",
"Vodnikova cesta",
"Vodnikovo naselje",
"Vodovodna cesta",
"Vogelna ulica",
"Vojkova cesta",
"Volaričeva ulica",
"Vošnjakova ulica",
"Vozna pot na Grad",
"Vožarski pot",
"Vrazov trg",
"Vrbovec",
"Vrbska ulica",
"Vregova ulica",
"Vrhovci, cesta I",
"Vrhovci, cesta II",
"Vrhovci, cesta III",
"Vrhovci, cesta IX",
"Vrhovci, cesta V",
"Vrhovci, cesta VI",
"Vrhovci, cesta X",
"Vrhovci, cesta XI",
"Vrhovci, cesta XII",
"Vrhovci, cesta XIV",
"Vrhovci, cesta XIX",
"Vrhovci, cesta XV",
"Vrhovci, cesta XVII",
"Vrhovci, cesta XVIII",
"Vrhovci, cesta XX",
"Vrhovci, cesta XXII",
"Vrhovci, cesta XXVI",
"Vrhovci, cesta XXVIII",
"Vrhovci, cesta XXXII",
"Vrhovčeva ulica",
"Vrhovnikova ulica",
"Vrtača",
"Vrtna ulica",
"Vrtnarska cesta",
"Vulčeva ulica",
"Vzajemna ulica",
"Windischerjeva ulica",
"Wolfova ulica",
"Za Garažami",
"Za gasilskim domom",
"Za Gradom",
"Za krajem",
"Za opekarno",
"Za partizanskim domom",
"Za progo",
"Za vasjo",
"Zadnikarjeva ulica",
"Zadobrovška cesta",
"Zadružna ulica",
"Zajčeva pot",
"Zajčevi dvori",
"Zakotnikova ulica",
"Zalaznikova ulica",
"Zaletelova ulica",
"Zaloška cesta",
"Zarnikova ulica",
"Zasavska cesta",
"Zatišje",
"Zavetiška ulica",
"Završje",
"Zbašnikova ulica",
"Zdešarjeva cesta",
"Zelena pot",
"Zelenova ulica",
"Zeljarska ulica",
"Zevnikova ulica",
"Zidarjev štradon",
"Ziherlova ulica",
"Zlatek",
"Znamenjska ulica",
"Zofke Kvedrove ulica",
"Zoisova cesta",
"Zupanova ulica",
"Zvezda",
"Zvezdarska ulica",
"Zvezna ulica",
"Žabarjeva ulica",
"Žabjak",
"Žalska ulica",
"Žaucerjeva ulica",
"Žeje",
"Železna cesta",
"Železnikarjeva ulica",
"Žerjalova ulica",
"Židankova ulica",
"Židovska steza",
"Židovska ulica",
"Živaličeva ulica",
"Živinozdravska ulica",
"Žolgerjeva ulica",
)
states = (
"Pomurksa",
"Podravska",
"Koroška",
"Savinjska",
"Zasavska",
"Spodnjeposavska",
"Jugovzhodna Slovenija",
"Osrednjeslovenska",
"Gorenjska",
"Notranjsko - kraška",
"Goriška",
"Obalno - kraška",
)
countries = (
"Afganistan",
"Islamska republika Afganistan",
"Albanija",
"Alžirija",
"Ljudska demokratična republika Alžirija",
"Andora",
"Angola",
"Republika Angola",
"Antigva in Barbuda",
"Argentina",
"Armenija",
"Republika Armenija",
"Avstralija",
"Avstrija",
"Azerbajdžan",
"Azerbajdžanska republika",
"Bahami",
"Zveza Bahami",
"Država Bahrajn",
"Bangladeš",
"Ljudska republika Bangladeš",
"Belgija",
"Kraljevina Belgija",
"Belize",
"Belorusija",
"Benin",
"Republika Benin",
"Bocvana",
"Republika Bocvana",
"Republika Bolgarija",
"Bolivija",
"Republika Bolivija",
"Brazilija",
"Federativna republika Brazilija",
"Brunej",
"Burkina Faso",
"Burundi",
"Republika Burundi",
"Butan",
"Ciper",
"Republika Ciper",
"Čad",
"Republika Čad",
"Češka",
"Čile",
"Republika Čile",
"Črna gora",
"Republika Črna gora",
"Kraljevina Danska",
"Dominika",
"Zveza Dominika",
"Džibuti",
"Republika Džibuti",
"Egipt",
"Arabska republika Egipt",
"Republika Ekvador",
"Ekvatorialna Gvineja",
"Eritreja",
"Estonija",
"Republika Estonija",
"Etiopija",
"Fidži",
"Filipini",
"Republika Filipini",
"Finska",
"Republika Finska",
"Francoska republika",
"Gabon",
"Gabonska republika",
"Gambija",
"Gana",
"Republika Gana",
"Grčija",
"Helenska republika",
"Grenada",
"Gvajana",
"Republika Gvajana",
"Gvatemala",
"Republika Gvatemala",
"Republika Gvineja",
"Gvineja Bissau",
"Republika Gvineja Bissau",
"Republika Haiti",
"Honduras",
"Republika Honduras",
"Hrvaška",
"Indija",
"Republika Indija",
"Indonezija",
"Republika Indonezija",
"Republika Irak",
"Iran",
"Islamska republika Iran",
"Irska",
"Republika Islandija",
"Italija",
"Italijanska republika",
"Izrael",
"Jamajka",
"Japonska",
"Jemen",
"Republika Jemen",
"Jordanija",
"Južna Afrika",
"Republika Južna Afrika",
"Južna Koreja",
"Kambodža",
"Kraljevina Kambodža",
"Kamerun",
"Republika Kamerun",
"Katar",
"Država Katar",
"Kazahstan",
"Republika Kazahstan",
"Kenija",
"Kirgizistan",
"Kirgiška republika",
"Kiribati",
"Kitajska",
"Kolumbija",
"Republika Kolumbija",
"Komori",
"Kongo",
"Republika Kongo",
"Demokratična republika Kongo",
"Republika Kostarika",
"Kuba",
"Republika Kuba",
"Kuvajt",
"Laos",
"Laoška ljudska demokratična republika",
"Latvija",
"Lesoto",
"Kraljevina Lesoto",
"Libanon",
"Libanonska republika",
"Republika Liberija",
"Libija",
"Libijska arabska džamahirija",
"Lihtenštajn",
"Kneževina Lihtenštajn",
"Litva",
"Republika Litva",
"Veliko vojvodstvo Luksemburg",
"Madagaskar",
"Republika Madagaskar",
"Republika Madžarska",
"Republika Severna Makedonija",
"Malavi",
"Maldivi",
"Republika Maldivi",
"Malezija",
"Mali",
"Republika Mali",
"Republika Malta",
"Maroko",
"Kraljevina Maroko",
"Marshallovi otoki",
"Mauritius",
"Republika Mauritius",
"Mavretanija",
"Mehika",
"Združene mehiške države",
"Mikronezija",
"Mjanmar",
"Zveza Mjanmar",
"Moldavija",
"Moldavija, Republika",
"Kneževina Monako",
"Mongolija",
"Mozambik",
"Republika Mozambik",
"Republika Namibija",
"Nauru",
"Republika Nauru",
"Nemčija",
"Nepal",
"Kraljevina Nepal",
"Niger",
"Republika Niger",
"Nigerija",
"Nikaragva",
"Republika Nikaragva",
"Nizozemska",
"Norveška",
"Kraljevina Norveška",
"Nova Zelandija",
"Oman",
"Pakistan",
"Islamska republika Pakistan",
"Palau",
"Republika Palau",
"Republika Panama",
"Papua Nova Gvineja",
"Paragvaj",
"Peru",
"Republika Peru",
"Poljska",
"Republika Poljska",
"Portugalska republika",
"Romunija",
"Ruanda",
"Republika Ruanda",
"Ruska federacija",
"Saint Kitts in Nevis",
"Saint Lucia",
"Salomonovi otoki",
"Salvador",
"Republika Salvador",
"San Marino",
"Sao Tome in Principe",
"Demokratična republika Sao Tome in Principe",
"Kraljevina Saudova Arabija",
"Sejšeli",
"Republika Sejšeli",
"Republika Senegal",
"Severna Koreja",
"Severna Makedonija",
"Sierra Leone",
"Republika Sierra Leone",
"Singapur",
"Sirija",
"Sirska arabska republika",
"Slonokoščena obala",
"Slovaška",
"Slovaška republika",
"Slovenija",
"Republika Slovenija",
"Somalska demokratična republika",
"Srbija",
"Republika Srbija",
"Sudan",
"Republika Sudan",
"Surinam",
"Republika Surinam",
"Svazi",
"Španija",
"Kraljevina Španija",
"Šrilanka",
"Švedska",
"Kraljevina Švedska",
"Švica",
"Tadžikistan",
"Republika Tadžikistan",
"Tajska",
"Tajvan",
"Tajvan, Provinca Kitajske",
"Tanzanija",
"Togo",
"Togoška republika",
"Tonga",
"Kraljevina Tonga",
"Republika Trinidad in Tobago",
"Tunizija",
"Republika Tunizija",
"Republika Turčija",
"Turkmenistan",
"Tuvalu",
"Uganda",
"Ukrajina",
"Urugvaj",
"Vzhodna republika Urugvaj",
"Uzbekistan",
"Vanuatu",
"Republika Vanuatu",
"Vatikan",
"Velika Britanija",
"Združeno kraljestvo",
"Venezuela",
"Republika Venezuela",
"Vietnam",
"Vzhodni Timor",
"Demokratična republika Vzhodni Timor",
"Samoa",
"Neodvisna država Zahodna Samoa",
"Zambija",
"Združene države Amerike",
"Združene države",
"Združeni arabski emirati",
"Zelenortski otoki",
)
def city_name(self) -> str:
return self.random_element(self.cities)
def street_name(self) -> str:
return self.random_element(self.streets)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
| Provider |
python | pyodide__pyodide | src/tests/test_pyodide_http.py | {
"start": 12427,
"end": 18901
} | class ____:
"""Test suite for pyxhr synchronous HTTP client."""
def test_xhr_basic_get(self, selenium, xhr_test_server):
"""Test basic GET request with pyxhr."""
request_url = xhr_test_server.url_for("/xhr/get")
result_json = selenium.run(f"""
import json
from pyodide.http import pyxhr
response = pyxhr.get('{request_url}')
result = {{
'status_code': response.status_code,
'data': response.json(),
'ok': response.ok
}}
json.dumps(result)
""")
import json
result = json.loads(result_json)
assert result["status_code"] == 200
assert result["data"]["message"] == "GET success"
assert result["ok"] is True
def test_xhr_post_json(self, selenium, xhr_test_server):
"""Test POST request with JSON data."""
request_url = xhr_test_server.url_for("/xhr/post")
result_json = selenium.run(f"""
import json
from pyodide.http import pyxhr
response = pyxhr.post('{request_url}', json={{"test": "data"}})
result = {{
'status_code': response.status_code,
'data': response.json()
}}
json.dumps(result)
""")
import json
result = json.loads(result_json)
assert result["status_code"] == 200
assert result["data"]["message"] == "POST success"
def test_xhr_custom_headers(self, selenium, xhr_test_server):
"""Test custom headers in xhr request."""
request_url = xhr_test_server.url_for("/xhr/headers")
result = selenium.run(f"""
from pyodide.http import pyxhr
response = pyxhr.get('{request_url}', headers={{"X-Test-Header": "test-value"}})
data = response.json()
data['headers'].get('X-Test-Header', 'not-found')
""")
assert result == "test-value"
def test_xhr_basic_auth(self, selenium, xhr_test_server):
"""Test basic authentication with pyxhr."""
request_url = xhr_test_server.url_for("/xhr/auth")
result_json = selenium.run(f"""
import json
from pyodide.http import pyxhr
response = pyxhr.get('{request_url}', auth=('test', 'pass'))
result = {{
'status_code': response.status_code,
'data': response.json()
}}
json.dumps(result)
""")
import json
result = json.loads(result_json)
assert result["status_code"] == 200
assert result["data"]["authenticated"] is True
def test_xhr_url_params(self, selenium, xhr_test_server):
"""Test URL parameters with pyxhr."""
request_url = xhr_test_server.url_for("/xhr/get")
result = selenium.run(f"""
from pyodide.http import pyxhr
response = pyxhr.get('{request_url}', params={{"key1": "value1", "key2": "value2"}})
# Check that the response URL contains the parameters
'?' in response.url and 'key1=value1' in response.url and 'key2=value2' in response.url
""")
assert result is True
def test_xhr_error_status(self, selenium, xhr_test_server):
"""Test error status handling."""
request_url = xhr_test_server.url_for("/xhr/error")
result_json = selenium.run(f"""
import json
from pyodide.http import pyxhr, HttpStatusError
response = pyxhr.get('{request_url}')
try:
response.raise_for_status()
result = {{"error_raised": False}}
except HttpStatusError as e:
result = {{"error_raised": True, "status": e.status}}
json.dumps(result)
""")
import json
result = json.loads(result_json)
assert result["error_raised"] is True
assert result["status"] == 404
def test_xhr_response_properties(self, selenium, xhr_test_server):
"""Test XHRResponse properties."""
request_url = xhr_test_server.url_for("/xhr/get")
result_json = selenium.run(f"""
import json
from pyodide.http import pyxhr
response = pyxhr.get('{request_url}')
result = {{
'status_code': response.status_code,
'text_type': type(response.text).__name__,
'content_type': type(response.content).__name__,
'headers_type': type(response.headers).__name__,
'ok': response.ok,
'has_url': bool(response.url)
}}
json.dumps(result)
""")
import json
result = json.loads(result_json)
assert result["status_code"] == 200
assert result["text_type"] == "str"
assert result["content_type"] == "bytes"
assert result["headers_type"] == "dict"
assert result["ok"] is True
assert result["has_url"] is True
def test_xhr_all_methods(self, selenium, xhr_test_server):
"""Test all HTTP methods are available."""
result_json = selenium.run("""
import json
from pyodide.http import pyxhr
methods = ['get', 'post', 'put', 'delete', 'head', 'patch', 'options']
available_methods = []
for method in methods:
if hasattr(pyxhr, method) and callable(getattr(pyxhr, method)):
available_methods.append(method)
json.dumps(available_methods)
""")
import json
result = json.loads(result_json)
expected_methods = ["get", "post", "put", "delete", "head", "patch", "options"]
assert result == expected_methods
def test_xhr_not_in_browser(monkeypatch):
"""Test that _xhr_request raises RuntimeError when not in a browser environment."""
import pytest
# Mock the IN_PYODIDE flag to simulate non-browser environment
import pyodide.http
monkeypatch.setattr(pyodide.http, "IN_PYODIDE", False)
# Test that _xhr_request raises RuntimeError when called outside browser
with pytest.raises(
RuntimeError, match="XMLHttpRequest is only available in browser environments"
):
from pyodide.http import pyxhr
# This should raise RuntimeError when trying to make a request
pyxhr.get("http://test.com")
| TestPyxhr |
python | numba__numba | numba/core/types/npytypes.py | {
"start": 17961,
"end": 18284
} | class ____(Type):
"""
This is the type for `np.ndarray.flags`.
"""
def __init__(self, arytype):
self.array_type = arytype
name = "ArrayFlags({0})".format(self.array_type)
super(ArrayFlags, self).__init__(name)
@property
def key(self):
return self.array_type
| ArrayFlags |
python | django__django | tests/auth_tests/test_models.py | {
"start": 4155,
"end": 8391
} | class ____(TransactionTestCase):
available_apps = [
"auth_tests",
"django.contrib.auth",
"django.contrib.contenttypes",
]
def test_create_user(self):
email_lowercase = "normal@normal.com"
user = User.objects.create_user("user", email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, "user")
self.assertFalse(user.has_usable_password())
def test_create_user_email_domain_normalize_rfc3696(self):
# According to RFC 3696 Section 3 the "@" symbol can be part of the
# local part of an email address.
returned = UserManager.normalize_email(r"Abc\@DEF@EXAMPLE.com")
self.assertEqual(returned, r"Abc\@DEF@example.com")
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email("normal@DOMAIN.COM")
self.assertEqual(returned, "normal@domain.com")
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email(r"email\ with_whitespace@D.COM")
self.assertEqual(returned, r"email\ with_whitespace@d.com")
def test_empty_username(self):
with self.assertRaisesMessage(ValueError, "The given username must be set"):
User.objects.create_user(username="")
def test_create_user_is_staff(self):
email = "normal@normal.com"
user = User.objects.create_user("user", email, is_staff=True)
self.assertEqual(user.email, email)
self.assertEqual(user.username, "user")
self.assertTrue(user.is_staff)
def test_create_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(
ValueError, "Superuser must have is_superuser=True."
):
User.objects.create_superuser(
username="test",
email="test@test.com",
password="test",
is_superuser=False,
)
async def test_acreate_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(
ValueError, "Superuser must have is_superuser=True."
):
await User.objects.acreate_superuser(
username="test",
email="test@test.com",
password="test",
is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, "Superuser must have is_staff=True."):
User.objects.create_superuser(
username="test",
email="test@test.com",
password="test",
is_staff=False,
)
async def test_acreate_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, "Superuser must have is_staff=True."):
await User.objects.acreate_superuser(
username="test",
email="test@test.com",
password="test",
is_staff=False,
)
def test_runpython_manager_methods(self):
def forwards(apps, schema_editor):
UserModel = apps.get_model("auth", "User")
user = UserModel.objects.create_user("user1", password="secure")
self.assertIsInstance(user, UserModel)
operation = migrations.RunPython(forwards, migrations.RunPython.noop)
project_state = ProjectState()
project_state.add_model(ModelState.from_model(User))
project_state.add_model(ModelState.from_model(Group))
project_state.add_model(ModelState.from_model(Permission))
project_state.add_model(ModelState.from_model(ContentType))
new_state = project_state.clone()
with connection.schema_editor() as editor:
operation.state_forwards("test_manager_methods", new_state)
operation.database_forwards(
"test_manager_methods",
editor,
project_state,
new_state,
)
user = User.objects.get(username="user1")
self.assertTrue(user.check_password("secure"))
| UserManagerTestCase |
python | google__pytype | pytype_extensions/__init__.py | {
"start": 388,
"end": 1471
} | class ____(Protocol[T]):
"""Protocol that matches any `attrs` class (or instance thereof).
Can be used to match any `attrs` class. Example:
@attrs.define
class Foo:
x: str
y: int
@attrs.define
class Bar:
x: str
y: str
class Baz:
x: str
y: int
def foo(item: Attr):
pass
def bar(item: Attr[str]):
pass
def baz(item: Attr[Union[int, str]]):
pass
foo(Foo(x='yes', y=1)) # ok
foo(Bar(x='yes', y='no')) # ok
foo(Baz(x='yes', y=1)) # error, not a `attrs` class
bar(Foo(x='yes', y=1)) # error, has a non-str field
bar(Bar(x='yes', y='no')) # ok
bar(Baz(x='yes', y=1)) # error, not a `attrs` class
baz(Foo(x='yes', y=1)) # ok
baz(Bar(x='yes', y='no')) # ok
baz(Baz(x='yes', y=1)) # error, not a `attrs` class
The only way to identify an `attrs` class is to test for the presence of the
`__attrs_attrs__` member; that is what attrs.has uses:
https://github.com/python-attrs/attrs/blob/main/src/attr/_funcs.py#L290
"""
__attrs_attrs__: Tuple['attr.Attribute[T]', ...]
| Attrs |
python | facelessuser__soupsieve | tests/test_level4/test_default.py | {
"start": 52,
"end": 4650
} | class ____(util.TestCase):
"""Test default selectors."""
def test_default(self):
"""Test default."""
markup = """
<form>
<input type="radio" name="season" id="spring">
<label for="spring">Spring</label>
<input type="radio" name="season" id="summer" checked>
<label for="summer">Summer</label>
<input type="radio" name="season" id="fall">
<label for="fall">Fall</label>
<input type="radio" name="season" id="winter">
<label for="winter">Winter</label>
<select id="pet-select">
<option value="">--Please choose an option--</option>
<option id="dog" value="dog">Dog</option>
<option id="cat" value="cat">Cat</option>
<option id="hamster" value="hamster" selected>Hamster</option>
<option id="parrot" value="parrot">Parrot</option>
<option id="spider" value="spider">Spider</option>
<option id="goldfish" value="goldfish">Goldfish</option>
</select>
<input type="checkbox" name="enable" id="enable" checked>
<label for="enable">Enable</label>
<button type="button">
not default
</button>
<button id="d1" type="submit">
default1
</button>
<button id="d2" type="submit">
default2
</button>
</form>
<form>
<div>
<button id="d3" type="submit">
default3
</button>
</div>
<button id="d4" type="submit">
default4
</button>
</form>
<button id="d5" type="submit">
default4
</button>
"""
self.assert_selector(
markup,
":default",
['summer', 'd1', 'd3', 'hamster', 'enable'],
flags=util.HTML
)
def test_iframe(self):
"""Test with `iframe`."""
markup = """
<html>
<body>
<form>
<button id="d1" type="submit">default1</button>
</form>
<form>
<iframe>
<html>
<body>
<button id="d2" type="submit">default2</button>
</body>
</html>
</iframe>
<button id="d3" type="submit">default3</button>
</form>
<iframe>
<html>
<body>
<form>
<button id="d4" type="submit">default4</button>
</form>
</body>
</html>
</iframe>
</body>
</html>
"""
self.assert_selector(
markup,
":default",
['d1', 'd3', 'd4'],
flags=util.PYHTML
)
def test_nested_form(self):
"""
Test nested form.
This is technically invalid use of forms, but browsers will generally evaluate first in the nested forms.
"""
markup = """
<form>
<form>
<button id="d1" type="submit">
button1
</button>
</form>
<button id="d2" type="submit">
button2
</button>
</form>
"""
self.assert_selector(
markup,
":default",
['d1'],
flags=util.HTML
)
def test_default_cached(self):
"""
Test that we use the cached "default".
For the sake of coverage, we will do this impractical select
to ensure we reuse the cached default.
"""
markup = """
<form>
<form>
<button id="d1" type="submit">
button1
</button>
</form>
<button id="d2" type="submit">
button2
</button>
</form>
"""
self.assert_selector(
markup,
":default:default",
['d1'],
flags=util.HTML
)
def test_nested_form_fail(self):
"""
Test that the search for elements will bail after the first nested form.
You shouldn't nest forms, but if you do, when a parent form encounters a nested form,
we will bail evaluation like browsers do. We should see button 1 getting found for nested
form, but button 2 will not be found for parent form.
"""
markup = """
<form>
<form>
<span>what</span>
</form>
<button id="d2" type="submit">
button2
</button>
</form>
"""
self.assert_selector(
markup,
":default",
[],
flags=util.HTML
)
| TestDefault |
python | huggingface__transformers | src/transformers/generation/continuous_batching/cache_manager.py | {
"start": 962,
"end": 1814
} | class ____:
"""A class to represent a block managed by the block manager. We say that a block is complete when the physical KV
cache it points to is fully computed. A block can have a parent, which is the block that came before in the
sequence. Once a block is complete, it is given a hash, which takes into account the tokens ids of the block and
its parent's hash (if there is a parent)."""
def __init__(self, id_: int, parent_id: int | None) -> None:
self.id: int = id_
self.parent_id: int | None = parent_id
self.hash: int | None = None
self.ref_count: int = 1
def __repr__(self) -> str:
return f"Block(id={self.id}, parent_id={self.parent_id}, hash={self.hash}, ref_count={self.ref_count})"
@property
def is_complete(self) -> bool:
return self.hash is not None
| Block |
python | scikit-learn__scikit-learn | sklearn/externals/_arff.py | {
"start": 18158,
"end": 18217
} | class ____(_DataListMixin, DenseGeneratorData):
pass
| Data |
python | tornadoweb__tornado | tornado/test/queues_test.py | {
"start": 13114,
"end": 13981
} | class ____(AsyncTestCase):
@gen_test
def test_producer_consumer(self):
q = queues.Queue(maxsize=3) # type: queues.Queue[int]
history = []
# We don't yield between get() and task_done(), so get() must wait for
# the next tick. Otherwise we'd immediately call task_done and unblock
# join() before q.put() resumes, and we'd only process the first four
# items.
@gen.coroutine
def consumer():
while True:
history.append((yield q.get()))
q.task_done()
@gen.coroutine
def producer():
for item in range(10):
yield q.put(item)
consumer()
yield producer()
yield q.join()
self.assertEqual(list(range(10)), history)
if __name__ == "__main__":
unittest.main()
| ProducerConsumerTest |
python | getsentry__sentry | src/sentry/preprod/api/endpoints/preprod_artifact_rerun_analysis.py | {
"start": 972,
"end": 1133
} | class ____:
size_metrics_total_deleted: int = 0
size_comparisons_total_deleted: int = 0
files_total_deleted: int = 0
@region_silo_endpoint
| CleanupStats |
python | lepture__authlib | authlib/integrations/httpx_client/oauth1_client.py | {
"start": 3264,
"end": 4606
} | class ____(_OAuth1Client, httpx.Client):
auth_class = OAuth1Auth
def __init__(
self,
client_id,
client_secret=None,
token=None,
token_secret=None,
redirect_uri=None,
rsa_key=None,
verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False,
**kwargs,
):
_client_kwargs = extract_client_kwargs(kwargs)
# app keyword was dropped!
app_value = _client_kwargs.pop("app", None)
if app_value is not None:
_client_kwargs["transport"] = httpx.WSGITransport(app=app_value)
httpx.Client.__init__(self, **_client_kwargs)
_OAuth1Client.__init__(
self,
self,
client_id=client_id,
client_secret=client_secret,
token=token,
token_secret=token_secret,
redirect_uri=redirect_uri,
rsa_key=rsa_key,
verifier=verifier,
signature_method=signature_method,
signature_type=signature_type,
force_include_body=force_include_body,
**kwargs,
)
@staticmethod
def handle_error(error_type, error_description):
raise OAuthError(error_type, error_description)
| OAuth1Client |
python | doocs__leetcode | solution/0400-0499/0420.Strong Password Checker/Solution.py | {
"start": 0,
"end": 1805
} | class ____:
def strongPasswordChecker(self, password: str) -> int:
def countTypes(s):
a = b = c = 0
for ch in s:
if ch.islower():
a = 1
elif ch.isupper():
b = 1
elif ch.isdigit():
c = 1
return a + b + c
types = countTypes(password)
n = len(password)
if n < 6:
return max(6 - n, 3 - types)
if n <= 20:
replace = cnt = 0
prev = '~'
for curr in password:
if curr == prev:
cnt += 1
else:
replace += cnt // 3
cnt = 1
prev = curr
replace += cnt // 3
return max(replace, 3 - types)
replace = cnt = 0
remove, remove2 = n - 20, 0
prev = '~'
for curr in password:
if curr == prev:
cnt += 1
else:
if remove > 0 and cnt >= 3:
if cnt % 3 == 0:
remove -= 1
replace -= 1
elif cnt % 3 == 1:
remove2 += 1
replace += cnt // 3
cnt = 1
prev = curr
if remove > 0 and cnt >= 3:
if cnt % 3 == 0:
remove -= 1
replace -= 1
elif cnt % 3 == 1:
remove2 += 1
replace += cnt // 3
use2 = min(replace, remove2, remove // 2)
replace -= use2
remove -= use2 * 2
use3 = min(replace, remove // 3)
replace -= use3
remove -= use3 * 3
return n - 20 + max(replace, 3 - types)
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/tests/test_training_status.py | {
"start": 3622,
"end": 4244
} | class ____(unittest.TestCase):
def test_metadata_compare(self):
# Test write_stats
with self.assertLogs("mlagents.trainers", level="WARNING") as cm:
default_metadata = StatusMetaData()
version_statsmetadata = StatusMetaData(mlagents_version="test")
default_metadata.check_compatibility(version_statsmetadata)
torch_version_statsmetadata = StatusMetaData(torch_version="test")
default_metadata.check_compatibility(torch_version_statsmetadata)
# Assert that 2 warnings have been thrown
assert len(cm.output) == 2
| StatsMetaDataTest |
python | huggingface__transformers | tests/models/xmod/test_modeling_xmod.py | {
"start": 1526,
"end": 13816
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return XmodConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
default_language="en_XX",
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XmodModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = XmodModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = XmodForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = XmodForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XmodForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = XmodForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = XmodForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XmodForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| XmodModelTester |
python | davidhalter__parso | parso/utils.py | {
"start": 4714,
"end": 6620
} | class ____(_PythonVersionInfo):
def __gt__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) > other
super().__gt__(other)
return (self.major, self.minor)
def __eq__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) == other
super().__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def _parse_version(version) -> PythonVersionInfo:
match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version)
if match is None:
raise ValueError('The given version is not in the right format. '
'Use something like "3.8" or "3".')
major = int(match.group(1))
minor = match.group(2)
if minor is None:
# Use the latest Python in case it's not exactly defined, because the
# grammars are typically backwards compatible?
if major == 2:
minor = "7"
elif major == 3:
minor = "6"
else:
raise NotImplementedError("Sorry, no support yet for those fancy new/old versions.")
minor = int(minor)
return PythonVersionInfo(major, minor)
def parse_version_string(version: str = None) -> PythonVersionInfo:
"""
Checks for a valid version number (e.g. `3.8` or `3.10.1` or `3`) and
returns a corresponding version info that is always two characters long in
decimal.
"""
if version is None:
version = '%s.%s' % sys.version_info[:2]
if not isinstance(version, str):
raise TypeError('version must be a string like "3.8"')
return _parse_version(version)
| PythonVersionInfo |
python | FactoryBoy__factory_boy | factory/fuzzy.py | {
"start": 3777,
"end": 4237
} | class ____(BaseFuzzyAttribute):
"""Random float within a given range."""
def __init__(self, low, high=None, precision=15):
if high is None:
high = low
low = 0
self.low = low
self.high = high
self.precision = precision
super().__init__()
def fuzz(self):
base = random.randgen.uniform(self.low, self.high)
return float(format(base, '.%dg' % self.precision))
| FuzzyFloat |
python | PyCQA__pylint | tests/functional/d/duplicate/duplicate_dict_literal_key.py | {
"start": 126,
"end": 546
} | class ____(Enum):
""" Sample Enum for testing duplicate keys"""
KEY = "key"
CORRECT_DICT = {
'tea': 'for two',
'two': 'for tea',
}
WRONG_WITH_ENUM = { # [duplicate-key]
MyEnum.KEY: "value 1",
MyEnum.KEY: "value 2",
}
WRONG_DICT = { # [duplicate-key]
'tea': 'for two',
'two': 'for tea',
'tea': 'time',
}
{1: b'a', 1: u'a'} # [duplicate-key]
{1: 1, 1.0: 2} # [duplicate-key]
| MyEnum |
python | kamyu104__LeetCode-Solutions | Python/trapping-rain-water.py | {
"start": 29,
"end": 560
} | class ____(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
result, left, right, level = 0, 0, len(height)-1, 0
while left < right:
if height[left] < height[right]:
lower = height[left]
left += 1
else:
lower = height[right]
right -= 1
level = max(level, lower)
result += level-lower
return result
# Time: O(n)
# Space: O(1)
| Solution |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 42407,
"end": 47653
} | class ____(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2}
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv1x1",
spatial_downsample=1,
out_channels=256,
position_encoding_type="trainable",
concat_or_add_pos="concat",
project_pos_dim=256,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
>>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for image classification, for tasks such as ImageNet.
This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of
79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).
[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
(with `prep_type="pixels"`) to preprocess the input images, and
[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
[`PerceiverModel`] into classification logits.
"""
)
| PerceiverForImageClassificationLearned |
python | catalyst-team__catalyst | examples/detection/models/ssd.py | {
"start": 2003,
"end": 6149
} | class ____(nn.Module):
def __init__(self, backbone="resnet18", num_classes=80):
"""
Source:
https://github.com/NVIDIA/DeepLearningExamples/blob/70fcb70ff4bc49cc723195b35cfa8d4ce94a7f76/PyTorch/Detection/SSD/src/model.py
Args:
backbone (str): model backbone to use
n_classes (int): number of classes to predict
"""
super().__init__()
self.feature_extractor = ResnetBackbone(backbone)
self.label_num = num_classes + 1 # +background class
self._build_additional_features(self.feature_extractor.out_channels)
self.num_defaults = [4, 6, 6, 6, 4, 4]
self.loc = []
self.conf = []
for nd, oc in zip(self.num_defaults, self.feature_extractor.out_channels):
self.loc.append(nn.Conv2d(oc, nd * 4, kernel_size=3, padding=1))
self.conf.append(
nn.Conv2d(oc, nd * self.label_num, kernel_size=3, padding=1)
)
self.loc = nn.ModuleList(self.loc)
self.conf = nn.ModuleList(self.conf)
self._init_weights()
def _build_additional_features(self, input_size):
self.additional_blocks = []
for i, (input_size, output_size, channels) in enumerate(
zip(input_size[:-1], input_size[1:], [256, 256, 128, 128, 128])
):
if i < 3:
layer = nn.Sequential(
nn.Conv2d(input_size, channels, kernel_size=1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
nn.Conv2d(
channels,
output_size,
kernel_size=3,
padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(output_size),
nn.ReLU(inplace=True),
)
else:
layer = nn.Sequential(
nn.Conv2d(input_size, channels, kernel_size=1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
nn.Conv2d(channels, output_size, kernel_size=3, bias=False),
nn.BatchNorm2d(output_size),
nn.ReLU(inplace=True),
)
self.additional_blocks.append(layer)
self.additional_blocks = nn.ModuleList(self.additional_blocks)
def _init_weights(self):
layers = [*self.additional_blocks, *self.loc, *self.conf]
for layer in layers:
for param in layer.parameters():
if param.dim() > 1:
nn.init.xavier_uniform_(param)
# Shape the classifier to the view of bboxes
def bbox_view(self, src, loc, conf):
ret = []
for s, l, c in zip(src, loc, conf):
# ret.append((l(s).view(s.size(0), 4, -1), c(s).view(s.size(0), self.label_num, -1)))
ret.append(
(l(s).view(s.size(0), -1, 4), c(s).view(s.size(0), -1, self.label_num))
)
locs, confs = list(zip(*ret))
# locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous()
locs, confs = torch.cat(locs, 1).contiguous(), torch.cat(confs, 1).contiguous()
return locs, confs
def forward(self, x):
"""
Args:
x (torch.Tensor): batch of data, expected shapes [B, 3, H, W]
Returns:
bbox locations (torch.Tensor) with shapes [B, A, 4],
where B - batch size, A - num anchors
class confidence logits (torch.Tensor) with shapes [B, A, N_CLASSES],
where B - batch size, A - num anchors
"""
x = self.feature_extractor(x)
detection_feed = [x]
for layer in self.additional_blocks:
x = layer(x)
detection_feed.append(x)
# Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
locs, confs = self.bbox_view(detection_feed, self.loc, self.conf)
return locs, confs
| SingleShotDetector |
python | pytorch__pytorch | torch/nn/parallel/distributed.py | {
"start": 9235,
"end": 11401
} | class ____(JoinHook):
def __init__(self, ddp, divide_by_initial_world_size):
"""Set config variables for internal usage."""
assert isinstance(ddp, DistributedDataParallel), (
"DDP join hook requires passing in a DistributedDataParallel "
"instance as the state"
)
assert ddp.logger is not None
ddp.logger._set_uneven_input_join()
self.ddp = ddp
self.ddp._divide_by_initial_world_size = divide_by_initial_world_size
super().__init__()
def main_hook(self):
"""Shadow the DDP collective communication operations in the forward and backward passes."""
ddp = self.ddp
# Buckets are rebuilt only once during a training period
ddp.reducer._rebuild_buckets()
# Schedule a broadcast if we are syncing module buffers in the
# forward pass
# TODO: make DDP uneven inputs context manager support buffer
# comm hook (https://github.com/pytorch/pytorch/issues/65436)
ddp._check_and_sync_module_buffers()
# Check if need to sync in the backward pass
should_sync_backwards = ddp._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
# Forward parameter sync is disabled in the next iteration if we
# are skipping gradient sync this iteration, so set
# `require_forward_param_sync` accordingly
ddp.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
return
# Schedule one allreduce per gradient bucket to match the backward
# pass allreduce
ddp._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused parameters
if ddp.find_unused_parameters:
ddp._match_unused_params_allreduce()
# Rebuilt parameters are pushed only once during a training period
ddp.reducer._push_all_rebuilt_params()
def post_hook(self, is_last_joiner: bool):
"""Sync the final model to ensure that the model is the same across all processes."""
self.ddp._sync_final_model(is_last_joiner)
| _DDPJoinHook |
python | numpy__numpy | numpy/polynomial/tests/test_hermite.py | {
"start": 6116,
"end": 9970
} | class ____:
def test_hermint(self):
# check exceptions
assert_raises(TypeError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
assert_raises(ValueError, herm.hermint, [0], scl=[0])
assert_raises(TypeError, herm.hermint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
res = herm.hermint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c) for c in c2d])
res = herm.hermint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
res = herm.hermint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
| TestIntegral |
python | huggingface__transformers | src/transformers/models/layoutlmv2/modeling_layoutlmv2.py | {
"start": 24801,
"end": 35770
} | class ____(LayoutLMv2PreTrainedModel):
def __init__(self, config):
requires_backends(self, "detectron2")
super().__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = LayoutLMv2VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if inputs_embeds is None:
inputs_embeds = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape):
visual_bbox_x = torch.div(
torch.arange(
0,
1000 * (image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[1],
rounding_mode="floor",
)
visual_bbox_y = torch.div(
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[0],
rounding_mode="floor",
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
return visual_bbox
def _get_input_shape(self, input_ids=None, inputs_embeds=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
return input_ids.size()
elif inputs_embeds is not None:
return inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
bbox (`torch.LongTensor` of shape `((batch_size, sequence_length), 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
Examples:
```python
>>> from transformers import AutoProcessor, LayoutLMv2Model, set_seed
>>> from PIL import Image
>>> import torch
>>> from datasets import load_dataset
>>> set_seed(0)
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
>>> image = dataset["test"][0]["image"]
>>> encoding = processor(image, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
>>> last_hidden_states.shape
torch.Size([1, 342, 768])
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = self._get_input_shape(input_ids, inputs_embeds)
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
# needs a new copy of input_shape for tracing. Otherwise wrong dimensions will occur
final_shape = list(self._get_input_shape(input_ids, inputs_embeds))
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand(input_shape)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
embeddings, e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
"""
)
| LayoutLMv2Model |
python | squidfunk__mkdocs-material | material/plugins/social/layout.py | {
"start": 2161,
"end": 2342
} | class ____(Config):
x = Type(int, default = 0)
y = Type(int, default = 0)
# # -----------------------------------------------------------------------------
# Background
| Offset |
python | sphinx-doc__sphinx | sphinx/writers/manpage.py | {
"start": 2349,
"end": 15732
} | class ____(SphinxTranslator, manpage.Translator):
"""Custom man page translator."""
_docinfo: dict[str, Any] = {}
def __init__(self, document: nodes.document, builder: Builder) -> None:
super().__init__(document, builder)
# first title is the manpage title
self.section_level = -1
# docinfo set by man_pages config value
self._docinfo['title'] = self.settings.title
self._docinfo['subtitle'] = self.settings.subtitle
if self.settings.authors:
# don't set it if no author given
self._docinfo['author'] = self.settings.authors
self._docinfo['manual_section'] = self.settings.section
# docinfo set by other config values
self._docinfo['title_upper'] = self._docinfo['title'].upper()
if self.config.today:
self._docinfo['date'] = self.config.today
else:
today_fmt = self.config.today_fmt or _('%b %d, %Y')
self._docinfo['date'] = format_date(
today_fmt, language=self.config.language
)
self._docinfo['copyright'] = self.config.copyright
self._docinfo['version'] = self.config.version
self._docinfo['manual_group'] = self.config.project
# Overwrite admonition label translations with our own
for label, translation in admonitionlabels.items():
self.language.labels[label] = self.deunicode(translation)
# overwritten -- added quotes around all .TH arguments
def header(self) -> str:
tmpl = (
'.TH "%(title_upper)s" "%(manual_section)s"'
' "%(date)s" "%(version)s" "%(manual_group)s"\n'
)
if self._docinfo['subtitle']:
tmpl += '.SH NAME\n%(title)s \\- %(subtitle)s\n'
return tmpl % self._docinfo
def visit_start_of_file(self, node: Element) -> None:
pass
def depart_start_of_file(self, node: Element) -> None:
pass
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: nodes.definition_list) -> None:
self.visit_definition_list(node)
def depart_desc(self, node: nodes.definition_list) -> None:
self.depart_definition_list(node)
def visit_desc_signature(self, node: nodes.term) -> None:
self.visit_definition_list_item(node) # type: ignore[arg-type]
self.visit_term(node)
def depart_desc_signature(self, node: nodes.term) -> None:
self.depart_term(node)
def visit_desc_signature_line(self, node: Element) -> None:
pass
def depart_desc_signature_line(self, node: Element) -> None:
self.body.append(' ')
def visit_desc_content(self, node: nodes.definition) -> None:
self.visit_definition(node)
def depart_desc_content(self, node: nodes.definition) -> None:
self.depart_definition(node)
def visit_desc_inline(self, node: Element) -> None:
pass
def depart_desc_inline(self, node: Element) -> None:
pass
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
pass
def depart_desc_name(self, node: Element) -> None:
pass
def visit_desc_addname(self, node: Element) -> None:
pass
def depart_desc_addname(self, node: Element) -> None:
pass
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(' -> ')
def depart_desc_returns(self, node: Element) -> None:
pass
def visit_desc_parameterlist(self, node: Element) -> None:
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
self.body.append(')')
def visit_desc_type_parameter_list(self, node: Element) -> None:
self.body.append('[')
self.first_param = 1
def depart_desc_type_parameter_list(self, node: Element) -> None:
self.body.append(']')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node: Element) -> None:
pass
def visit_desc_type_parameter(self, node: Element) -> None:
self.visit_desc_parameter(node)
def depart_desc_type_parameter(self, node: Element) -> None:
self.depart_desc_parameter(node)
def visit_desc_optional(self, node: Element) -> None:
self.body.append('[')
def depart_desc_optional(self, node: Element) -> None:
self.body.append(']')
def visit_desc_annotation(self, node: Element) -> None:
pass
def depart_desc_annotation(self, node: Element) -> None:
pass
##############################################
def visit_versionmodified(self, node: nodes.paragraph) -> None:
self.visit_paragraph(node)
def depart_versionmodified(self, node: nodes.paragraph) -> None:
self.depart_paragraph(node)
# overwritten -- don't make whole of term bold if it includes strong node
def visit_term(self, node: nodes.term) -> None:
if any(node.findall(nodes.strong)):
self.body.append('\n')
else:
super().visit_term(node)
# overwritten -- we don't want source comments to show up
def visit_comment(self, node: Element) -> None: # type: ignore[override]
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node: nodes.footnote) -> None:
self.ensure_eol()
super().visit_footnote(node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node: Element) -> None:
self.ensure_eol()
if len(node) == 1 and node.astext() in {'Footnotes', _('Footnotes')}:
self.body.append('.SH ' + self.deunicode(node.astext()).upper() + '\n')
raise nodes.SkipNode
self.body.append('.sp\n')
def depart_rubric(self, node: Element) -> None:
self.body.append('\n')
def visit_seealso(self, node: nodes.admonition) -> None:
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node: nodes.admonition) -> None:
self.depart_admonition(node)
def visit_productionlist(self, node: Element) -> None:
self.ensure_eol()
self.body.append('.sp\n.nf\n')
def depart_productionlist(self, node: Element) -> None:
self.body.append('\n.fi\n')
def visit_production(self, node: Element) -> None:
pass
def depart_production(self, node: Element) -> None:
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node: Element) -> None:
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
raise nodes.SkipNode
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node: nodes.reference) -> None:
uri = node.get('refuri', '')
is_safe_to_click = uri.startswith(('mailto:', 'http:', 'https:', 'ftp:'))
if is_safe_to_click:
# OSC 8 link start (using groff's device control directive).
self.body.append(rf"\X'tty: link {uri}'")
self.body.append(self.defs['reference'][0])
# avoid repeating escaping code... fine since
# visit_Text calls astext() and only works on that afterwards
self.visit_Text(node) # type: ignore[arg-type]
self.body.append(self.defs['reference'][1])
if uri and not uri.startswith('#'):
# if configured, put the URL after the link
if self.config.man_show_urls and node.astext() != uri:
uri = uri.removeprefix('mailto:')
self.body.extend([
' <',
self.defs['strong'][0],
uri,
self.defs['strong'][1],
'>',
])
if is_safe_to_click:
# OSC 8 link end.
self.body.append(r"\X'tty: link'")
raise nodes.SkipNode
def visit_number_reference(self, node: Element) -> None:
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_centered(self, node: Element) -> None:
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node: Element) -> None:
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node: Element) -> None:
pass
def depart_compact_paragraph(self, node: Element) -> None:
pass
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_toctree(self, node: Element) -> None:
raise nodes.SkipNode
def visit_index(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
raise nodes.SkipNode
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_acks(self, node: Element) -> None:
bullet_list = cast('nodes.bullet_list', node[0])
list_items = cast('Iterable[nodes.list_item]', bullet_list)
self.ensure_eol()
bullet_list = cast('nodes.bullet_list', node[0])
list_items = cast('Iterable[nodes.list_item]', bullet_list)
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n')
raise nodes.SkipNode
def visit_hlist(self, node: nodes.bullet_list) -> None:
self.visit_bullet_list(node)
def depart_hlist(self, node: nodes.bullet_list) -> None:
self.depart_bullet_list(node)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def visit_literal_emphasis(self, node: nodes.emphasis) -> None:
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node: nodes.emphasis) -> None:
return self.depart_emphasis(node)
def visit_literal_strong(self, node: nodes.strong) -> None:
return self.visit_strong(node)
def depart_literal_strong(self, node: nodes.strong) -> None:
return self.depart_strong(node)
def visit_abbreviation(self, node: Element) -> None:
pass
def depart_abbreviation(self, node: Element) -> None:
pass
def visit_manpage(self, node: nodes.strong) -> None:
return self.visit_strong(node)
def depart_manpage(self, node: nodes.strong) -> None:
return self.depart_strong(node)
# overwritten: handle section titles better than in 0.6 release
def visit_caption(self, node: nodes.caption) -> None:
if (
isinstance(node.parent, nodes.container)
and node.parent.get('literal_block')
): # fmt: skip
self.body.append('.sp\n')
else:
super().visit_caption(node)
def depart_caption(self, node: nodes.caption) -> None:
if (
isinstance(node.parent, nodes.container)
and node.parent.get('literal_block')
): # fmt: skip
self.body.append('\n')
else:
super().depart_caption(node)
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node: nodes.title) -> None:
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return None
elif isinstance(node.parent, nodes.section):
if self.section_level == 0:
# skip the document title
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append(f'.SH {self.deunicode(node.astext().upper())}\n')
raise nodes.SkipNode
return super().visit_title(node)
def depart_title(self, node: nodes.title) -> None:
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return None
return super().depart_title(node)
def visit_raw(self, node: Element) -> None:
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
raise nodes.SkipNode
def visit_inline(self, node: Element) -> None:
pass
def depart_inline(self, node: Element) -> None:
pass
def visit_math(self, node: nodes.math) -> None:
pass
def depart_math(self, node: nodes.math) -> None:
pass
def visit_math_block(self, node: nodes.math_block) -> None:
self.visit_centered(node)
def depart_math_block(self, node: nodes.math_block) -> None:
self.depart_centered(node)
| ManualPageTranslator |
python | GoogleCloudPlatform__python-docs-samples | run/django/polls/test_polls.py | {
"start": 647,
"end": 870
} | class ____(TestCase):
def test_index_view(self: PollViewTests) -> None:
response = self.client.get("/")
assert response.status_code == 200
assert "Hello, world" in str(response.content)
| PollViewTests |
python | pypa__warehouse | tests/unit/accounts/test_core.py | {
"start": 2642,
"end": 3760
} | class ____:
@pytest.mark.parametrize(
("identity", "flag", "orgs", "expected"),
[
(False, True, [], False), # Unauth'd always have no access
(False, False, [], False), # Unauth'd always have no access
(True, False, [], True), # Flag allows all authenticated users
(True, True, [], False), # Flag blocks all authenticated users without orgs
(
True,
True,
[pretend.stub()],
True,
), # Flag allows users with organizations
],
)
def test_organization_access(self, db_session, identity, flag, orgs, expected):
user = None if not identity else UserFactory()
request = pretend.stub(
identity=UserContext(user, None),
find_service=lambda interface, context=None: pretend.stub(
get_organizations_by_user=lambda x: orgs
),
flags=pretend.stub(enabled=lambda flag_name: flag),
)
assert expected == accounts._organization_access(request)
| TestOrganizationAccess |
python | huggingface__transformers | src/transformers/models/olmo2/modular_olmo2.py | {
"start": 9609,
"end": 12273
} | class ____(OlmoAttention):
def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx=layer_idx)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
# The OLMo2 layers are identical to those of the OLMo model except:
# - RMSNorm is used instead of standard layer norm.
# - Norm is applied after attention/feedforward rather than before.
| Olmo2Attention |
python | matplotlib__matplotlib | lib/matplotlib/widgets.py | {
"start": 89152,
"end": 104331
} | class ____(_SelectorWidget):
"""
Visually select a min/max range on a single axis and call a function with
those values.
To guarantee that the selector remains responsive, keep a reference to it.
In order to turn off the SpanSelector, set ``span_selector.active`` to
False. To turn it back on, set it to True.
Press and release events triggered at the same coordinates outside the
selection will clear the selector, except when
``ignore_event_outside=True``.
Parameters
----------
ax : `~matplotlib.axes.Axes`
onselect : callable with signature ``func(min: float, max: float)``
A callback function that is called after a release event and the
selection is created, changed or removed.
direction : {"horizontal", "vertical"}
The direction along which to draw the span selector.
minspan : float, default: 0
If selection is less than or equal to *minspan*, the selection is
removed (when already existing) or cancelled.
useblit : bool, default: False
If True, use the backend-dependent blitting features for faster
canvas updates. See the tutorial :ref:`blitting` for details.
props : dict, default: {'facecolor': 'red', 'alpha': 0.5}
Dictionary of `.Patch` properties.
onmove_callback : callable with signature ``func(min: float, max: float)``, optional
Called on mouse move while the span is being selected.
interactive : bool, default: False
Whether to draw a set of handles that allow interaction with the
widget after it is drawn.
button : `.MouseButton` or list of `.MouseButton`, default: all buttons
The mouse buttons which activate the span selector.
handle_props : dict, default: None
Properties of the handle lines at the edges of the span. Only used
when *interactive* is True. See `.Line2D` for valid properties.
grab_range : float, default: 10
Distance in pixels within which the interactive tool handles can be activated.
state_modifier_keys : dict, optional
Keyboard modifiers which affect the widget's behavior. Values
amend the defaults, which are:
- "clear": Clear the current shape, default: "escape".
drag_from_anywhere : bool, default: False
If `True`, the widget can be moved by clicking anywhere within its bounds.
ignore_event_outside : bool, default: False
If `True`, the event triggered outside the span selector will be ignored.
snap_values : 1D array-like, optional
Snap the selector edges to the given values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(vmin, vmax):
... print(vmin, vmax)
>>> span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
... props=dict(facecolor='blue', alpha=0.5))
>>> fig.show()
See also: :doc:`/gallery/widgets/span_selector`
"""
def __init__(self, ax, onselect, direction, *, minspan=0, useblit=False,
props=None, onmove_callback=None, interactive=False,
button=None, handle_props=None, grab_range=10,
state_modifier_keys=None, drag_from_anywhere=False,
ignore_event_outside=False, snap_values=None):
if state_modifier_keys is None:
state_modifier_keys = dict(clear='escape',
square='not-applicable',
center='not-applicable',
rotate='not-applicable')
super().__init__(ax, onselect, useblit=useblit, button=button,
state_modifier_keys=state_modifier_keys)
if props is None:
props = dict(facecolor='red', alpha=0.5)
props['animated'] = self.useblit
self.direction = direction
self._extents_on_press = None
self.snap_values = snap_values
self.onmove_callback = onmove_callback
self.minspan = minspan
self.grab_range = grab_range
self._interactive = interactive
self._edge_handles = None
self.drag_from_anywhere = drag_from_anywhere
self.ignore_event_outside = ignore_event_outside
self.new_axes(ax, _props=props, _init=True)
# Setup handles
self._handle_props = {
'color': props.get('facecolor', 'r'),
**cbook.normalize_kwargs(handle_props, Line2D)}
if self._interactive:
self._edge_order = ['min', 'max']
self._setup_edge_handles(self._handle_props)
self._active_handle = None
def new_axes(self, ax, *, _props=None, _init=False):
"""Set SpanSelector to operate on a new Axes."""
reconnect = False
if _init or self.canvas is not ax.get_figure(root=True).canvas:
if self.canvas is not None:
self.disconnect_events()
reconnect = True
self.ax = ax
if reconnect:
self.connect_default_events()
# Reset
self._selection_completed = False
if self.direction == 'horizontal':
trans = ax.get_xaxis_transform()
w, h = 0, 1
else:
trans = ax.get_yaxis_transform()
w, h = 1, 0
rect_artist = Rectangle((0, 0), w, h, transform=trans, visible=False)
if _props is not None:
rect_artist.update(_props)
elif self._selection_artist is not None:
rect_artist.update_from(self._selection_artist)
self.ax.add_patch(rect_artist)
self._selection_artist = rect_artist
def _setup_edge_handles(self, props):
# Define initial position using the axis bounds to keep the same bounds
if self.direction == 'horizontal':
positions = self.ax.get_xbound()
else:
positions = self.ax.get_ybound()
self._edge_handles = ToolLineHandles(self.ax, positions,
direction=self.direction,
line_props=props,
useblit=self.useblit)
@property
def _handles_artists(self):
if self._edge_handles is not None:
return self._edge_handles.artists
else:
return ()
def _set_span_cursor(self, *, enabled):
"""Update the canvas cursor based on direction of the selector."""
if enabled:
cursor = (backend_tools.Cursors.RESIZE_HORIZONTAL
if self.direction == 'horizontal' else
backend_tools.Cursors.RESIZE_VERTICAL)
else:
cursor = backend_tools.Cursors.POINTER
self._set_cursor(cursor)
def connect_default_events(self):
# docstring inherited
super().connect_default_events()
if getattr(self, '_interactive', False):
self.connect_event('motion_notify_event', self._hover)
def _press(self, event):
"""Button press event handler."""
self._set_span_cursor(enabled=True)
if self._interactive and self._selection_artist.get_visible():
self._set_active_handle(event)
else:
self._active_handle = None
if self._active_handle is None or not self._interactive:
# Clear previous rectangle before drawing new rectangle.
self.update()
xdata, ydata = self._get_data_coords(event)
v = xdata if self.direction == 'horizontal' else ydata
if self._active_handle is None and not self.ignore_event_outside:
# when the press event outside the span, we initially set the
# visibility to False and extents to (v, v)
# update will be called when setting the extents
self._visible = False
self._set_extents((v, v))
# We need to set the visibility back, so the span selector will be
# drawn when necessary (span width > 0)
self._visible = True
else:
self.set_visible(True)
return False
@property
def direction(self):
"""Direction of the span selector: 'vertical' or 'horizontal'."""
return self._direction
@direction.setter
def direction(self, direction):
"""Set the direction of the span selector."""
_api.check_in_list(['horizontal', 'vertical'], direction=direction)
if hasattr(self, '_direction') and direction != self._direction:
# remove previous artists
self._selection_artist.remove()
if self._interactive:
self._edge_handles.remove()
self._direction = direction
self.new_axes(self.ax)
if self._interactive:
self._setup_edge_handles(self._handle_props)
else:
self._direction = direction
def _release(self, event):
"""Button release event handler."""
self._set_span_cursor(enabled=False)
if not self._interactive:
self._selection_artist.set_visible(False)
if (self._active_handle is None and self._selection_completed and
self.ignore_event_outside):
return
vmin, vmax = self.extents
span = vmax - vmin
if span <= self.minspan:
# Remove span and set self._selection_completed = False
self.set_visible(False)
if self._selection_completed:
# Call onselect, only when the span is already existing
self.onselect(vmin, vmax)
self._selection_completed = False
else:
self.onselect(vmin, vmax)
self._selection_completed = True
self.update()
self._active_handle = None
return False
def _hover(self, event):
"""Update the canvas cursor if it's over a handle."""
if self.ignore(event):
return
if self._active_handle is not None or not self._selection_completed:
# Do nothing if button is pressed and a handle is active, which may
# occur with drag_from_anywhere=True.
# Do nothing if selection is not completed, which occurs when
# a selector has been cleared
return
_, e_dist = self._edge_handles.closest(event.x, event.y)
self._set_span_cursor(enabled=e_dist <= self.grab_range)
def _onmove(self, event):
"""Motion notify event handler."""
xdata, ydata = self._get_data_coords(event)
if self.direction == 'horizontal':
v = xdata
vpress = self._eventpress.xdata
else:
v = ydata
vpress = self._eventpress.ydata
# move existing span
# When "dragging from anywhere", `self._active_handle` is set to 'C'
# (match notation used in the RectangleSelector)
if self._active_handle == 'C' and self._extents_on_press is not None:
vmin, vmax = self._extents_on_press
dv = v - vpress
vmin += dv
vmax += dv
# resize an existing shape
elif self._active_handle and self._active_handle != 'C':
vmin, vmax = self._extents_on_press
if self._active_handle == 'min':
vmin = v
else:
vmax = v
# new shape
else:
# Don't create a new span if there is already one when
# ignore_event_outside=True
if self.ignore_event_outside and self._selection_completed:
return
vmin, vmax = vpress, v
if vmin > vmax:
vmin, vmax = vmax, vmin
self._set_extents((vmin, vmax))
if self.onmove_callback is not None:
self.onmove_callback(vmin, vmax)
return False
def _draw_shape(self, vmin, vmax):
if vmin > vmax:
vmin, vmax = vmax, vmin
if self.direction == 'horizontal':
self._selection_artist.set_x(vmin)
self._selection_artist.set_width(vmax - vmin)
else:
self._selection_artist.set_y(vmin)
self._selection_artist.set_height(vmax - vmin)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event."""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Prioritise center handle over other handles
# Use 'C' to match the notation used in the RectangleSelector
if 'move' in self._state:
self._active_handle = 'C'
elif e_dist > self.grab_range:
# Not close to any handles
self._active_handle = None
if self.drag_from_anywhere and self._contains(event):
# Check if we've clicked inside the region
self._active_handle = 'C'
self._extents_on_press = self.extents
else:
self._active_handle = None
return
else:
# Closest to an edge handle
self._active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
self._extents_on_press = self.extents
def _contains(self, event):
"""Return True if event is within the patch."""
return self._selection_artist.contains(event, radius=0)[0]
@staticmethod
def _snap(values, snap_values):
"""Snap values to a given array values (snap_values)."""
# take into account machine precision
eps = np.min(np.abs(np.diff(snap_values))) * 1e-12
return tuple(
snap_values[np.abs(snap_values - v + np.sign(v) * eps).argmin()]
for v in values)
@property
def extents(self):
"""
(float, float)
The values, in data coordinates, for the start and end points of the current
selection. If there is no selection then the start and end values will be
the same.
"""
if self.direction == 'horizontal':
vmin = self._selection_artist.get_x()
vmax = vmin + self._selection_artist.get_width()
else:
vmin = self._selection_artist.get_y()
vmax = vmin + self._selection_artist.get_height()
return vmin, vmax
@extents.setter
def extents(self, extents):
self._set_extents(extents)
self._selection_completed = True
def _set_extents(self, extents):
# Update displayed shape
if self.snap_values is not None:
extents = tuple(self._snap(extents, self.snap_values))
self._draw_shape(*extents)
if self._interactive:
# Update displayed handles
self._edge_handles.set_data(self.extents)
self.set_visible(self._visible)
self.update()
| SpanSelector |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 3451,
"end": 3682
} | class ____(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
| CorrectNotFittedError |
python | kamyu104__LeetCode-Solutions | Python/find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k.py | {
"start": 32,
"end": 381
} | class ____(object):
def findMinFibonacciNumbers(self, k):
"""
:type k: int
:rtype: int
"""
result, a, b = 0, 1, 1
while b <= k:
b, a = a+b, b
while k:
if a <= k:
k -= a
result += 1
a, b = b-a, a
return result
| Solution |
python | doocs__leetcode | solution/2200-2299/2275.Largest Combination With Bitwise AND Greater Than Zero/Solution.py | {
"start": 0,
"end": 233
} | class ____:
def largestCombination(self, candidates: List[int]) -> int:
ans = 0
for i in range(max(candidates).bit_length()):
ans = max(ans, sum(x >> i & 1 for x in candidates))
return ans
| Solution |
python | ray-project__ray | python/ray/dashboard/modules/reporter/gpu_providers.py | {
"start": 448,
"end": 562
} | class ____(enum.Enum):
"""Enum for GPU provider types."""
NVIDIA = "nvidia"
AMD = "amd"
| GpuProviderType |
python | python__mypy | mypy/nodes.py | {
"start": 51724,
"end": 52487
} | class ____(Statement):
__slots__ = ("body", "is_unreachable")
__match_args__ = ("body", "is_unreachable")
def __init__(self, body: list[Statement], *, is_unreachable: bool = False) -> None:
super().__init__()
self.body = body
# True if we can determine that this block is not executed during semantic
# analysis. For example, this applies to blocks that are protected by
# something like "if PY3:" when using Python 2. However, some code is
# only considered unreachable during type checking and this is not true
# in those cases.
self.is_unreachable = is_unreachable
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_block(self)
# Statements
| Block |
python | encode__django-rest-framework | tests/test_negotiation.py | {
"start": 753,
"end": 3218
} | class ____(TestCase):
def setUp(self):
self.renderers = [MockJSONRenderer(), MockHTMLRenderer(), MockOpenAPIRenderer()]
self.negotiator = DefaultContentNegotiation()
def select_renderer(self, request):
return self.negotiator.select_renderer(request, self.renderers)
def test_client_without_accept_use_renderer(self):
request = Request(factory.get('/'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
assert accepted_media_type == 'application/json'
def test_client_underspecifies_accept_use_renderer(self):
request = Request(factory.get('/', HTTP_ACCEPT='*/*'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
assert accepted_media_type == 'application/json'
def test_client_overspecifies_accept_use_client(self):
request = Request(factory.get('/', HTTP_ACCEPT='application/json; indent=8'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
assert accepted_media_type == 'application/json; indent=8'
def test_client_specifies_parameter(self):
request = Request(factory.get('/', HTTP_ACCEPT='application/openapi+json;version=2.0'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
assert accepted_media_type == 'application/openapi+json;version=2.0'
assert accepted_renderer.format == 'swagger'
def test_match_is_false_if_main_types_not_match(self):
mediatype = _MediaType('test_1')
another_mediatype = _MediaType('test_2')
assert mediatype.match(another_mediatype) is False
def test_mediatype_match_is_false_if_keys_not_match(self):
mediatype = _MediaType(';test_param=foo')
another_mediatype = _MediaType(';test_param=bar')
assert mediatype.match(another_mediatype) is False
def test_mediatype_precedence_with_wildcard_subtype(self):
mediatype = _MediaType('test/*')
assert mediatype.precedence == 1
def test_mediatype_string_representation(self):
mediatype = _MediaType('test/*; foo=bar')
assert str(mediatype) == 'test/*; foo=bar'
def test_raise_error_if_no_suitable_renderers_found(self):
class MockRenderer:
format = 'xml'
renderers = [MockRenderer()]
with pytest.raises(Http404):
self.negotiator.filter_renderers(renderers, format='json')
| TestAcceptedMediaType |
python | Textualize__textual | tests/test_focus.py | {
"start": 298,
"end": 380
} | class ____(Widget, can_focus=False, can_focus_children=False):
pass
| NonFocusable |
python | keras-team__keras | keras/src/metrics/metrics_utils.py | {
"start": 995,
"end": 1537
} | class ____(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = "ROC"
PR = "PR"
PRGAIN = "PRGAIN"
@staticmethod
def from_str(key):
if key in ("pr", "PR"):
return AUCCurve.PR
elif key in ("roc", "ROC"):
return AUCCurve.ROC
elif key in ("prgain", "PRGAIN"):
return AUCCurve.PRGAIN
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC", "PRGAIN"]'
)
| AUCCurve |
python | pytorch__pytorch | torch/_dynamo/variables/lists.py | {
"start": 63868,
"end": 65846
} | class ____(IteratorVariable):
# only needed for isinstance(..., range_iterator) to work
_nonvar_fields = {
"iter_obj",
}
def __init__(
self, start: int, stop: int, step: int, len_: int, **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.start = start
self.stop = stop
self.step = step
self.len = len_
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if name == "__next__":
return self.next_variable(tx)
elif name == "__iter__":
return self
return super().call_method(tx, name, args, kwargs)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
if self.python_type() is range_iterator:
ri = iter(range(0))
return ConstantVariable(hasattr(ri, name))
return super().call_obj_hasattr(tx, name)
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
if self.len <= 0:
raise_observed_exception(StopIteration, tx)
self.len -= 1
current = self.start
self.start += self.step
return ConstantVariable.create(current)
def python_type(self) -> type:
return range_iterator
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.append_output(codegen.create_load_python_module(range)) # type: ignore[arg-type]
)
codegen.append_output(codegen.create_load_const(self.start))
codegen.append_output(codegen.create_load_const(self.stop))
codegen.append_output(codegen.create_load_const(self.step))
codegen.extend_output(create_call_function(3, False))
codegen.append_output(create_instruction("GET_ITER"))
| RangeIteratorVariable |
python | apache__airflow | providers/http/tests/unit/http/hooks/test_http.py | {
"start": 2775,
"end": 27513
} | class ____:
"""Test get, post and raise_for_status"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="http_default", conn_type="http", host="test:8080/", extra='{"bearer": "test"}'
)
)
create_connection_without_db(
Connection(conn_id="http_conn_without_bearer", conn_type="http", host="test.com", port=1234)
)
create_connection_without_db(
Connection(
conn_id="http_conn_with_user_pwd",
conn_type="http",
host="test.com",
login="username",
password="pass",
)
)
def setup_method(self):
import requests_mock
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount("mock", adapter)
self.get_hook = HttpHook(method="GET")
self.get_lowercase_hook = HttpHook(method="get")
self.post_hook = HttpHook(method="POST")
def test_raise_for_status_with_200(self, requests_mock):
requests_mock.get(
"http://test:8080/v1/test", status_code=200, text='{"status":{"status": 200}}', reason="OK"
)
resp = self.get_hook.run("v1/test")
assert resp.text == '{"status":{"status": 200}}'
def test_get_request_with_port(self, requests_mock):
from requests.exceptions import MissingSchema
requests_mock.get(
"http://test.com:1234/some/endpoint",
status_code=200,
text='{"status":{"status": 200}}',
reason="OK",
)
expected_url = "http://test.com:1234/some/endpoint"
get_hook = HttpHook(method="GET", http_conn_id="http_conn_without_bearer")
for endpoint in ["some/endpoint", "/some/endpoint"]:
with contextlib.suppress(MissingSchema):
get_hook.run(endpoint)
assert requests_mock.call_count == 1
assert requests_mock.last_request.url == expected_url
assert requests_mock.last_request.method == "GET"
requests_mock.reset()
def test_get_request_do_not_raise_for_status_if_check_response_is_false(self, requests_mock):
requests_mock.get(
"http://test:8080/v1/test",
status_code=404,
text='{"status":{"status": 404}}',
reason="Bad request",
)
resp = self.get_hook.run("v1/test", extra_options={"check_response": False})
assert resp.text == '{"status":{"status": 404}}'
# need to check last
@pytest.mark.parametrize("setup_connections_with_extras", [{"check_response": False}], indirect=True)
def test_get_request_do_not_raise_for_status_if_check_response_is_false_in_connection(
self, setup_connections_with_extras, requests_mock
):
requests_mock.get(
"http://test:8080/v1/test",
status_code=404,
text='{"status":{"status": 404}}',
reason="Bad request",
)
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
resp = get_hook.run("v1/test")
assert resp.text == '{"status":{"status": 404}}'
def test_hook_contains_header_from_extra_field(self):
conn = self.get_hook.get_conn()
assert dict(conn.headers, **{"bearer": "test"}) == conn.headers
assert conn.headers.get("bearer") == "test"
@pytest.mark.parametrize(
"setup_connections_with_extras", [{"bearer": "test", "max_redirects": 3}], indirect=True
)
def test_hook_ignore_max_redirects_from_extra_field_as_header(self, setup_connections_with_extras):
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
conn = get_hook.get_conn()
assert dict(conn.headers, **{"bearer": "test", "max_redirects": 3}) != conn.headers
assert conn.headers.get("bearer") == "test"
assert conn.headers.get("allow_redirects") is None
assert conn.proxies == {}
assert conn.stream is False
assert conn.verify is True
assert conn.cert is None
assert conn.max_redirects == 3
assert conn.trust_env is True
@pytest.mark.parametrize(
"setup_connections_with_extras",
[{"bearer": "test", "proxies": {"http": "http://proxy:80", "https": "https://proxy:80"}}],
indirect=True,
)
def test_hook_ignore_proxies_from_extra_field_as_header(self, setup_connections_with_extras):
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
conn = get_hook.get_conn()
assert (
dict(
conn.headers,
**{"bearer": "test", "proxies": {"http": "http://proxy:80", "https": "https://proxy:80"}},
)
!= conn.headers
)
assert conn.headers.get("bearer") == "test"
assert conn.headers.get("proxies") is None
assert conn.proxies == {"http": "http://proxy:80", "https": "https://proxy:80"}
assert conn.stream is False
assert conn.verify is True
assert conn.cert is None
assert conn.max_redirects == DEFAULT_REDIRECT_LIMIT
assert conn.trust_env is True
@pytest.mark.parametrize(
"setup_connections_with_extras", [{"bearer": "test", "verify": False}], indirect=True
)
def test_hook_ignore_verify_from_extra_field_as_header(self, setup_connections_with_extras):
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
conn = get_hook.get_conn()
assert dict(conn.headers, **{"bearer": "test", "verify": False}) != conn.headers
assert conn.headers.get("bearer") == "test"
assert conn.headers.get("verify") is None
assert conn.proxies == {}
assert conn.stream is False
assert conn.verify is False
assert conn.cert is None
assert conn.max_redirects == DEFAULT_REDIRECT_LIMIT
assert conn.trust_env is True
@pytest.mark.parametrize(
"setup_connections_with_extras",
[{"bearer": "test", "cert": "cert.crt", "stream": True}],
indirect=True,
)
def test_hook_ignore_cert_from_extra_field_as_header(self, setup_connections_with_extras):
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
conn = get_hook.get_conn()
assert dict(conn.headers, **{"bearer": "test", "cert": "cert.crt", "stream": True}) != conn.headers
assert conn.headers.get("bearer") == "test"
assert conn.headers.get("cert") is None
assert conn.proxies == {}
assert conn.stream is True
assert conn.verify is True
assert conn.cert == "cert.crt"
assert conn.max_redirects == DEFAULT_REDIRECT_LIMIT
assert conn.trust_env is True
@pytest.mark.parametrize(
"setup_connections_with_extras", [{"bearer": "test", "trust_env": False}], indirect=True
)
def test_hook_ignore_trust_env_from_extra_field_as_header(self, setup_connections_with_extras):
get_hook = HttpHook(method="GET", http_conn_id="http_conn_with_extras")
conn = get_hook.get_conn()
assert dict(conn.headers, **{"bearer": "test", "trust_env": False}) != conn.headers
assert conn.headers.get("bearer") == "test"
assert conn.headers.get("cert") is None
assert conn.proxies == {}
assert conn.stream is False
assert conn.verify is True
assert conn.cert is None
assert conn.max_redirects == DEFAULT_REDIRECT_LIMIT
assert conn.trust_env is False
def test_hook_with_method_in_lowercase(self, requests_mock):
from requests.exceptions import InvalidURL, MissingSchema
requests_mock.get(
"http://test.com:1234/v1/test?test%20params",
status_code=200,
text='{"status":{"status": 200}}',
reason="OK",
)
get_lowercase_hook = HttpHook(method="get", http_conn_id="http_conn_without_bearer")
data = "test params"
with contextlib.suppress(MissingSchema, InvalidURL):
get_lowercase_hook.run("v1/test", data=data)
assert requests_mock.call_count == 1
assert requests_mock.last_request.url == "http://test.com:1234/v1/test?test%20params"
assert requests_mock.last_request.method == "GET"
def test_hook_uses_provided_header(self):
conn = self.get_hook.get_conn(headers={"bearer": "newT0k3n"})
assert conn.headers.get("bearer") == "newT0k3n"
def test_hook_has_no_header_from_extra(self):
self.get_hook.http_conn_id = "http_conn_without_bearer"
conn = self.get_hook.get_conn()
assert conn.headers.get("bearer") is None
def test_hooks_header_from_extra_is_overridden(self):
conn = self.get_hook.get_conn(headers={"bearer": "newT0k3n"})
assert conn.headers.get("bearer") == "newT0k3n"
def test_post_request(self, requests_mock):
requests_mock.post(
"http://test:8080/v1/test", status_code=200, text='{"status":{"status": 200}}', reason="OK"
)
resp = self.post_hook.run("v1/test")
assert resp.status_code == 200
def test_post_request_with_error_code(self, requests_mock):
requests_mock.post(
"http://test:8080/v1/test",
status_code=418,
text='{"status":{"status": 418}}',
reason="I'm a teapot",
)
with pytest.raises(AirflowException):
self.post_hook.run("v1/test")
def test_post_request_do_not_raise_for_status_if_check_response_is_false(self, requests_mock):
requests_mock.post(
"http://test:8080/v1/test",
status_code=418,
text='{"status":{"status": 418}}',
reason="I'm a teapot",
)
resp = self.post_hook.run("v1/test", extra_options={"check_response": False})
assert resp.status_code == 418
def test_post_request_raises_error_when_redirects_with_max_redirects_set_to_0(self, requests_mock):
requests_mock.post(
"http://test:8080/v1/test",
status_code=302,
headers={"Location": "http://test:8080/v1/redirected"},
)
requests_mock.post(
"http://test:8080/v1/redirected",
status_code=200,
text='{"message": "OK"}',
)
with pytest.raises(requests.exceptions.TooManyRedirects) as err:
self.post_hook.run("v1/test", extra_options={"max_redirects": 0})
assert str(err.value) == "Exceeded 0 redirects."
history = requests_mock.request_history
assert len(history) == 1
assert history[0].url == "http://test:8080/v1/test"
assert history[0].method == "POST"
@pytest.mark.parametrize(
"setup_connections_with_extras", [{"bearer": "test", "check_response": False}], indirect=True
)
def test_post_request_do_not_raise_for_status_if_check_response_is_false_within_extra(
self, setup_connections_with_extras, requests_mock
):
requests_mock.post(
"http://test:8080/v1/test",
status_code=418,
text='{"status":{"status": 418}}',
reason="I'm a teapot",
)
post_hook = HttpHook(method="POST", http_conn_id="http_conn_with_extras")
resp = post_hook.run("v1/test")
assert resp.status_code == 418
@mock.patch("requests.Session.send")
def test_retry_on_conn_error(self, mock_session_send):
retry_args = dict(
wait=tenacity.wait_none(),
stop=tenacity.stop_after_attempt(7),
retry=tenacity.retry_if_exception_type(requests.exceptions.ConnectionError),
)
def send_and_raise(unused_request, **kwargs):
raise requests.exceptions.ConnectionError
mock_session_send.side_effect = send_and_raise
# The job failed for some reason
with pytest.raises(tenacity.RetryError):
self.get_hook.run_with_advanced_retry(endpoint="v1/test", _retry_args=retry_args)
assert self.get_hook._retry_obj.stop.max_attempt_number == mock_session_send.call_count
def test_run_with_advanced_retry(self, requests_mock):
requests_mock.get("http://test:8080/v1/test", status_code=200, reason="OK")
retry_args = dict(
wait=tenacity.wait_none(),
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(Exception),
reraise=True,
)
response = self.get_hook.run_with_advanced_retry(endpoint="v1/test", _retry_args=retry_args)
assert isinstance(response, requests.Response)
def test_header_from_extra_and_run_method_are_merged(self):
def run_and_return(unused_session, prepped_request, unused_extra_options, **kwargs):
return prepped_request
# The job failed for some reason
with mock.patch(
"airflow.providers.http.hooks.http.HttpHook.run_and_check", side_effect=run_and_return
):
prepared_request = self.get_hook.run("v1/test", headers={"some_other_header": "test"})
actual = dict(prepared_request.headers)
assert actual.get("bearer") == "test"
assert actual.get("some_other_header") == "test"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_http_connection(self, mock_get_connection):
conn = Connection(conn_id="http_default", conn_type="http", host="localhost", schema="http")
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "http://localhost"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_https_connection(self, mock_get_connection):
conn = Connection(conn_id="http_default", conn_type="http", host="localhost", schema="https")
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "https://localhost"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_https_connection_port(self, mock_get_connection):
conn = Connection(
conn_id="http_default", conn_type="http", host="https://localhost", schema="https", port=8080
)
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "https://localhost:8080"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_host_encoded_http_connection(self, mock_get_connection):
conn = Connection(conn_id="http_default", conn_type="http", host="http://localhost")
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "http://localhost"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_host_encoded_https_connection(self, mock_get_connection):
conn = Connection(conn_id="http_default", conn_type="http", host="https://localhost")
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "https://localhost"
def test_method_converted_to_uppercase_when_created_in_lowercase(self):
assert self.get_lowercase_hook.method == "GET"
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_connection_without_host(self, mock_get_connection):
conn = Connection(conn_id="http_default", conn_type="http")
mock_get_connection.return_value = conn
hook = HttpHook()
hook.get_conn({})
assert hook.base_url == "http://"
@pytest.mark.parametrize("method", ["GET", "POST"])
def test_json_request(self, method, requests_mock):
obj1 = {"a": 1, "b": "abc", "c": [1, 2, {"d": 10}]}
def match_obj1(request):
return request.json() == obj1
requests_mock.request(method=method, url="//test:8080/v1/test", additional_matcher=match_obj1)
# will raise NoMockAddress exception if obj1 != request.json()
HttpHook(method=method).run("v1/test", json=obj1)
@mock.patch("requests.Session.send")
def test_verify_set_to_true_by_default(self, mock_session_send):
self.get_hook.run("/some/endpoint")
mock_session_send.assert_called_once_with(
mock.ANY,
allow_redirects=True,
cert=None,
proxies={},
stream=False,
timeout=None,
verify=True,
)
@mock.patch("requests.Session.send")
@mock.patch.dict(os.environ, {"REQUESTS_CA_BUNDLE": "/tmp/test.crt"})
def test_requests_ca_bundle_env_var(self, mock_session_send):
self.get_hook.run("/some/endpoint")
mock_session_send.assert_called_once_with(
mock.ANY,
allow_redirects=True,
cert=None,
proxies={},
stream=False,
timeout=None,
verify="/tmp/test.crt",
)
@mock.patch("requests.Session.send")
@mock.patch.dict(os.environ, {"REQUESTS_CA_BUNDLE": "/tmp/test.crt"})
def test_verify_respects_requests_ca_bundle_env_var(self, mock_session_send):
self.get_hook.run("/some/endpoint", extra_options={"verify": True})
mock_session_send.assert_called_once_with(
mock.ANY,
allow_redirects=True,
cert=None,
proxies={},
stream=False,
timeout=None,
verify="/tmp/test.crt",
)
@mock.patch("requests.Session.send")
@mock.patch.dict(os.environ, {"REQUESTS_CA_BUNDLE": "/tmp/test.crt"})
def test_verify_false_parameter_overwrites_set_requests_ca_bundle_env_var(self, mock_session_send):
self.get_hook.run("/some/endpoint", extra_options={"verify": False})
mock_session_send.assert_called_once_with(
mock.ANY,
allow_redirects=True,
cert=None,
proxies={},
stream=False,
timeout=None,
verify=False,
)
def test_connection_success(self, requests_mock):
requests_mock.get("http://test:8080", status_code=200, json={"status": {"status": 200}}, reason="OK")
status, msg = self.get_hook.test_connection()
assert status is True
assert msg == "Connection successfully tested"
def test_connection_failure(self, requests_mock):
requests_mock.get(
"http://test:8080", status_code=500, json={"message": "internal server error"}, reason="NOT_OK"
)
status, msg = self.get_hook.test_connection()
assert status is False
assert msg == "500:NOT_OK"
@mock.patch("requests.auth.AuthBase.__init__")
def test_loginless_custom_auth_initialized_with_no_args(self, auth):
auth.return_value = None
hook = HttpHook("GET", "http_default", AuthBase)
hook.get_conn()
auth.assert_called_once_with()
@mock.patch("requests.auth.AuthBase.__init__")
def test_loginless_custom_auth_initialized_with_args(self, auth):
auth.return_value = None
auth_with_args = functools.partial(AuthBase, "test_arg")
hook = HttpHook("GET", "http_default", auth_with_args)
hook.get_conn()
auth.assert_called_once_with("test_arg")
@mock.patch("requests.auth.HTTPBasicAuth.__init__")
def test_login_password_basic_auth_initialized(self, auth):
auth.return_value = None
hook = HttpHook("GET", "http_conn_with_user_pwd", HTTPBasicAuth)
hook.get_conn()
auth.assert_called_once_with("username", "pass")
@mock.patch("requests.auth.HTTPBasicAuth.__init__")
def test_default_auth_not_initialized(self, auth):
auth.return_value = None
hook = HttpHook("GET", "http_default")
hook.get_conn()
auth.assert_not_called()
def test_keep_alive_enabled(self):
with (
mock.patch(
"requests_toolbelt.adapters.socket_options.TCPKeepAliveAdapter.send"
) as tcp_keep_alive_send,
mock.patch("requests.adapters.HTTPAdapter.send") as http_send,
):
hook = HttpHook(method="GET")
response = Response()
response.status_code = HTTPStatus.OK
tcp_keep_alive_send.return_value = response
http_send.return_value = response
hook.run("v1/test")
tcp_keep_alive_send.assert_called()
http_send.assert_not_called()
def test_keep_alive_disabled(self):
with (
mock.patch(
"requests_toolbelt.adapters.socket_options.TCPKeepAliveAdapter.send"
) as tcp_keep_alive_send,
mock.patch("requests.adapters.HTTPAdapter.send") as http_send,
):
hook = HttpHook(method="GET", tcp_keep_alive=False)
response = Response()
response.status_code = HTTPStatus.OK
tcp_keep_alive_send.return_value = response
http_send.return_value = response
hook.run("v1/test")
tcp_keep_alive_send.assert_not_called()
http_send.assert_called()
@pytest.mark.parametrize(
("base_url", "endpoint", "expected_url"),
[
pytest.param("https://example.org", "/v1/test", "https://example.org/v1/test", id="both-set"),
pytest.param("", "http://foo/bar/v1/test", "http://foo/bar/v1/test", id="only-endpoint"),
],
)
def test_url_from_endpoint(self, base_url: str, endpoint: str, expected_url: str):
hook = HttpHook()
hook.base_url = base_url
hook._base_url_initialized = True # Mark as initialized to prevent lazy loading
assert hook.url_from_endpoint(endpoint) == expected_url
@mock.patch("airflow.providers.http.hooks.http.HttpHook.get_connection")
def test_url_from_endpoint_lazy_initialization(self, mock_get_connection):
"""Test that url_from_endpoint works without calling get_conn() first."""
# Mock the connection
mock_connection = mock.MagicMock()
mock_connection.host = "foo.bar.com"
mock_connection.schema = "https"
mock_connection.port = None
mock_get_connection.return_value = mock_connection
# Create hook without calling get_conn() and verify that base_url is not initialized
hook = HttpHook(http_conn_id="test_conn")
assert not hook._base_url_initialized
# This should work now with our fix and verify the URL was constructed correctly
url = hook.url_from_endpoint("baz/bop")
assert url == "https://foo.bar.com/baz/bop"
# Verify get_connection was called and and verify that base_url is now initialized
mock_get_connection.assert_called_once_with("test_conn")
assert hook._base_url_initialized
def test_custom_adapter(self):
custom_adapter = HTTPAdapter()
hook = HttpHook(method="GET", adapter=custom_adapter)
session = hook.get_conn()
assert isinstance(session.adapters["http://"], type(custom_adapter)), (
"Custom HTTP adapter not correctly mounted"
)
assert isinstance(session.adapters["https://"], type(custom_adapter)), (
"Custom HTTPS adapter not correctly mounted"
)
def test_process_extra_options_from_connection(self):
extra_options = {}
proxy = {"http": "http://proxy:80", "https": "https://proxy:80"}
conn = get_airflow_connection_with_extra(
extra={
"bearer": "test",
"stream": True,
"cert": "cert.crt",
"proxies": proxy,
"timeout": 60,
"verify": False,
"allow_redirects": False,
"max_redirects": 3,
"trust_env": False,
}
)()
actual_conn_extra, actual_merged_extra = _process_extra_options_from_connection(
conn=conn, extra_options=extra_options
)
assert actual_merged_extra == {
"cert": "cert.crt",
"stream": True,
"proxy": proxy,
"timeout": 60,
"verify_ssl": False,
"allow_redirects": False,
"max_redirects": 3,
"trust_env": False,
}
assert actual_conn_extra == {"bearer": "test"}
assert extra_options == {}
| TestHttpHook |
python | pytorch__pytorch | torch/nn/modules/transformer.py | {
"start": 28090,
"end": 42072
} | class ____(Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This TransformerEncoderLayer implements the original architecture described
in the `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_ paper. The
intent of this layer is as a reference implementation for foundational understanding
and thus it contains only limited features relative to newer Transformer architectures.
Given the fast pace of innovation in transformer-like architectures, we recommend
exploring this `tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_
to build efficient layers from building blocks in core or using higher
level libraries from the `PyTorch Ecosystem <https://landscape.pytorch.org/>`_.
TransformerEncoderLayer can handle either traditional torch.tensor inputs,
or Nested Tensor inputs. Derived classes are expected to similarly accept
both input formats. (Not all combinations of inputs are currently
supported by TransformerEncoderLayer while Nested Tensor is in prototype
state.)
If you are implementing a custom layer, you may derive it either from
the Module or TransformerEncoderLayer class. If your custom layer
supports both torch.Tensors and Nested Tensors inputs, make its
implementation a derived class of TransformerEncoderLayer. If your custom
Layer supports only torch.Tensor inputs, derive its implementation from
Module.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectively. Otherwise it's done after. Default: ``False`` (after).
bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
bias. Default: ``True``.
Examples:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(
... d_model=512, nhead=8, batch_first=True
... )
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
Fast path:
forward() will use a special optimized implementation described in
`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
conditions are met:
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
argument ``requires_grad``
- training is disabled (using ``.eval()``)
- batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
- activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
- at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
- if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
nor ``src_key_padding_mask`` is passed
- the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
unless the caller has manually modified one without modifying the other)
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
passed for ``src`` to represent padding more efficiently than using a padding
mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
returned, and an additional speedup proportional to the fraction of the input that
is padding can be expected.
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
https://arxiv.org/abs/2205.14135
"""
__constants__ = ["norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: str | Callable[[Tensor], Tensor] = F.relu,
layer_norm_eps: float = 1e-5,
batch_first: bool = False,
norm_first: bool = False,
bias: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.self_attn = MultiheadAttention(
d_model,
nhead,
dropout=dropout,
bias=bias,
batch_first=batch_first,
**factory_kwargs,
)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
self.norm_first = norm_first
# pyrefly: ignore [bad-argument-type]
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
# pyrefly: ignore [bad-argument-type]
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
activation = _get_activation_fn(activation)
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
self.activation = activation
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "activation"):
self.activation = F.relu
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
is_causal: bool = False,
) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
is_causal: If specified, applies a causal mask as ``src mask``.
Default: ``False``.
Warning:
``is_causal`` provides a hint that ``src_mask`` is the
causal mask. Providing incorrect hints can result in
incorrect execution, including forward and backward
compatibility.
Shape:
see the docs in :class:`~torch.nn.Transformer`.
"""
src_key_padding_mask = F._canonical_mask(
mask=src_key_padding_mask,
mask_name="src_key_padding_mask",
other_type=F._none_or_dtype(src_mask),
other_name="src_mask",
target_type=src.dtype,
)
src_mask = F._canonical_mask(
mask=src_mask,
mask_name="src_mask",
other_type=None,
other_name="",
target_type=src.dtype,
check_other=False,
)
is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
why_not_sparsity_fast_path = ""
if not is_fastpath_enabled:
why_not_sparsity_fast_path = (
"torch.backends.mha.get_fastpath_enabled() was not True"
)
elif src.dim() != 3:
why_not_sparsity_fast_path = (
f"input not batched; expected src.dim() of 3 but got {src.dim()}"
)
elif self.training:
why_not_sparsity_fast_path = "training is enabled"
elif not self.self_attn.batch_first:
why_not_sparsity_fast_path = "self_attn.batch_first was not True"
elif self.self_attn.in_proj_bias is None:
why_not_sparsity_fast_path = "self_attn was passed bias=False"
elif not self.self_attn._qkv_same_embed_dim:
why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
elif not self.activation_relu_or_gelu:
why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
elif self.norm1.eps != self.norm2.eps:
why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
elif src.is_nested and (
src_key_padding_mask is not None or src_mask is not None
):
why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
elif self.self_attn.num_heads % 2 == 1:
why_not_sparsity_fast_path = "num_head is odd"
elif torch.is_autocast_enabled():
why_not_sparsity_fast_path = "autocast is enabled"
elif any(
len(getattr(m, "_forward_hooks", {}))
+ len(getattr(m, "_forward_pre_hooks", {}))
for m in self.modules()
):
why_not_sparsity_fast_path = "forward pre-/hooks are attached to the module"
if not why_not_sparsity_fast_path:
tensor_args = (
src,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
_supported_device_type = [
"cpu",
"cuda",
torch.utils.backend_registration._privateuse1_backend_name,
]
if torch.overrides.has_torch_function(tensor_args):
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
elif not all(
(x.device.type in _supported_device_type) for x in tensor_args
):
why_not_sparsity_fast_path = (
"some Tensor argument's device is neither one of "
f"{_supported_device_type}"
)
elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
why_not_sparsity_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_sparsity_fast_path:
merged_mask, mask_type = self.self_attn.merge_masks(
src_mask, src_key_padding_mask, src
)
return torch._transformer_encoder_layer_fwd(
src,
self.self_attn.embed_dim,
self.self_attn.num_heads,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.activation_relu_or_gelu == 2,
self.norm_first,
self.norm1.eps,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
merged_mask,
mask_type,
)
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
x = src
if self.norm_first:
x = x + self._sa_block(
self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal
)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(
x
+ self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal)
)
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(
self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
is_causal: bool = False,
) -> Tensor:
x = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False,
is_causal=is_causal,
)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
| TransformerEncoderLayer |
python | pypa__hatch | tests/backend/builders/test_wheel.py | {
"start": 10523,
"end": 11846
} | class ____:
def test_default(self, isolation):
builder = WheelBuilder(str(isolation))
assert builder.config.core_metadata_constructor is builder.config.core_metadata_constructor
assert builder.config.core_metadata_constructor is get_core_metadata_constructors()[DEFAULT_METADATA_VERSION]
def test_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"core-metadata-version": 42}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
TypeError, match="Field `tool.hatch.build.targets.wheel.core-metadata-version` must be a string"
):
_ = builder.config.core_metadata_constructor
def test_unknown(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"core-metadata-version": "9000"}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
ValueError,
match=(
f"Unknown metadata version `9000` for field `tool.hatch.build.targets.wheel.core-metadata-version`. "
f"Available: {', '.join(sorted(get_core_metadata_constructors()))}"
),
):
_ = builder.config.core_metadata_constructor
| TestCoreMetadataConstructor |
python | modin-project__modin | modin/config/pubsub.py | {
"start": 3142,
"end": 3768
} | class ____(NamedTuple):
"""
Class for config data manipulating of exact type.
Parameters
----------
decode : callable
Callable to decode config value from the raw data.
normalize : callable
Callable to bring different config value variations to
the single form.
verify : callable
Callable to check that config value satisfies given config
type requirements.
help : str
Class description string.
"""
decode: Callable[[str], object]
normalize: Callable[[object], object]
verify: Callable[[object], bool]
help: str
| TypeDescriptor |
python | huggingface__transformers | src/transformers/models/ernie/modeling_ernie.py | {
"start": 20838,
"end": 21450
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = ErniePredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
| ErnieLMPredictionHead |
python | jd__tenacity | tenacity/__init__.py | {
"start": 16095,
"end": 17141
} | class ____(BaseRetrying):
"""Retrying controller."""
def __call__(
self,
fn: t.Callable[..., WrappedFnReturnT],
*args: t.Any,
**kwargs: t.Any,
) -> WrappedFnReturnT:
self.begin()
retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
while True:
do = self.iter(retry_state=retry_state)
if isinstance(do, DoAttempt):
try:
result = fn(*args, **kwargs)
except BaseException: # noqa: B902
retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
else:
retry_state.set_result(result)
elif isinstance(do, DoSleep):
retry_state.prepare_for_next_attempt()
self.sleep(do)
else:
return do # type: ignore[no-any-return]
if sys.version_info >= (3, 9):
FutureGenericT = futures.Future[t.Any]
else:
FutureGenericT = futures.Future
| Retrying |
python | doocs__leetcode | solution/2100-2199/2144.Minimum Cost of Buying Candies With Discount/Solution.py | {
"start": 0,
"end": 142
} | class ____:
def minimumCost(self, cost: List[int]) -> int:
cost.sort(reverse=True)
return sum(cost) - sum(cost[2::3])
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/models.py | {
"start": 5104,
"end": 9501
} | class ____(TimeStampedModel):
"""
Addons project configuration.
Store all the configuration for each of the addons.
Everything is enabled by default.
"""
# Model history
history = ExtraHistoricalRecords()
project = models.OneToOneField(
"Project",
related_name="addons",
null=True,
blank=True,
on_delete=models.CASCADE,
)
enabled = models.BooleanField(
default=True,
help_text="Enable/Disable all the addons on this project",
)
options_root_selector = models.CharField(
null=True,
blank=True,
max_length=128,
help_text="CSS selector for the main content of the page. Leave it blank for auto-detect.",
)
# Whether or not load addons library when the requested page is embedded (e.g. inside an iframe)
# https://github.com/readthedocs/addons/pull/415
options_load_when_embedded = models.BooleanField(default=False)
options_base_version = models.ForeignKey(
"builds.Version",
verbose_name=_("Base version to compare against (eg. DocDiff, File Tree Diff)"),
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# Analytics
# NOTE: we keep analytics disabled by default to save resources.
# Most projects won't be taking a look at these numbers.
analytics_enabled = models.BooleanField(default=False)
# Docdiff
doc_diff_enabled = models.BooleanField(default=True)
doc_diff_show_additions = models.BooleanField(default=True)
doc_diff_show_deletions = models.BooleanField(default=True)
# EthicalAds
ethicalads_enabled = models.BooleanField(default=True)
# File Tree Diff
filetreediff_enabled = models.BooleanField(default=True)
filetreediff_ignored_files = models.JSONField(
help_text=_("List of ignored files. One per line."),
null=True,
blank=True,
)
# Flyout
flyout_enabled = models.BooleanField(
default=True,
verbose_name=_("Enabled"),
)
flyout_sorting = models.CharField(
verbose_name=_("Sorting of versions"),
choices=ADDONS_FLYOUT_SORTING_CHOICES,
default=ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE,
max_length=64,
)
flyout_sorting_custom_pattern = models.CharField(
max_length=32,
default=None,
null=True,
blank=True,
verbose_name=_("Custom version sorting pattern"),
help_text="Sorting pattern supported by BumpVer "
'(<a href="https://github.com/mbarkhau/bumpver#pattern-examples">See examples</a>)',
)
flyout_sorting_latest_stable_at_beginning = models.BooleanField(
verbose_name=_("Show <code>latest</code> and <code>stable</code> at the beginning"),
default=True,
)
flyout_position = models.CharField(
choices=ADDONS_FLYOUT_POSITION_CHOICES,
max_length=64,
default=None, # ``None`` means use the default (theme override if present or Read the Docs default)
null=True,
blank=True,
verbose_name=_("Position"),
)
# Hotkeys
hotkeys_enabled = models.BooleanField(default=True)
# Search
search_enabled = models.BooleanField(default=True)
search_default_filter = models.CharField(null=True, blank=True, max_length=128)
# User JavaScript File
customscript_enabled = models.BooleanField(default=False)
# This is a user-defined file that will be injected at serve time by our
# Cloudflare Worker if defined
customscript_src = models.CharField(
max_length=512,
null=True,
blank=True,
help_text="URL to a JavaScript file to inject at serve time",
)
# Notifications
notifications_enabled = models.BooleanField(default=True)
notifications_show_on_latest = models.BooleanField(default=True)
notifications_show_on_non_stable = models.BooleanField(default=True)
notifications_show_on_external = models.BooleanField(default=True)
# Link Previews
linkpreviews_enabled = models.BooleanField(default=False)
linkpreviews_selector = models.CharField(
null=True,
blank=True,
max_length=128,
help_text="CSS selector to select links you want enabled for link previews. Leave it blank for auto-detect all links in your main page content.",
)
| AddonsConfig |
python | pytorch__pytorch | torch/distributed/_tools/mem_tracker.py | {
"start": 1494,
"end": 2327
} | class ____(_RefType):
"""
An enum to define memory reference types, categorizing tensors based on their usage within a model.
- PARAM: Tensors registered as nn.Parameter within modules.
- BUFFER: Tensors registered as nn.Buffer within modules.
- GRAD: Gradients associated with parameters.
- ACT: Tensors produced during the forward pass and recomputation in activation checkpointing.
- TMP: Temporary memory used during the backward pass, including gradients of activations.
- OPT: Tensors holding optimizer states.
- OTH: Tensors registered via `track_external` that do not fit the above categories.
"""
PARAM = "Parameter"
BUFFER = "Buffer"
GRAD = "Gradient"
ACT = "Activation"
TEMP = "Temp"
OPT = "Optstate"
OTH = "Other"
| _MemRefType |
python | jina-ai__jina | tests/k8s/slow-process-executor/debug_executor.py | {
"start": 76,
"end": 510
} | class ____(Executor):
def __init__(self, time_sleep=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.time_sleep = time_sleep
@requests
def process(self, docs: DocumentArray, *args, **kwargs):
time.sleep(self.time_sleep)
for doc in docs:
doc.tags['replica_uid'] = os.environ['POD_UID']
doc.tags['time'] = time.time()
return docs
| SlowProcessExecutor |
python | weaviate__weaviate-python-client | weaviate/collections/grpc/aggregate.py | {
"start": 631,
"end": 9670
} | class ____(_BaseGRPC):
def __init__(
self,
weaviate_version: _ServerVersion,
name: str,
tenant: Optional[str],
consistency_level: Optional[ConsistencyLevel],
validate_arguments: bool,
):
super().__init__(weaviate_version, consistency_level, validate_arguments)
self._name: str = name
self._tenant = tenant
def objects_count(self, connection: Connection) -> executor.Result[int]:
def resp(res: aggregate_pb2.AggregateReply) -> int:
return res.single_result.objects_count
return executor.execute(
response_callback=resp,
method=connection.grpc_aggregate,
request=self.__create_request(objects_count=True),
)
def hybrid(
self,
*,
query: Optional[str],
alpha: Optional[float],
vector: Optional[HybridVectorType],
properties: Optional[List[str]],
distance: Optional[NUMBER] = None,
target_vector: Optional[TargetVectorJoinType],
bm25_operator: Optional[BM25OperatorOptions],
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
object_limit: Optional[int],
objects_count: bool,
) -> aggregate_pb2.AggregateRequest:
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
hybrid=self._parse_hybrid(
query,
alpha,
vector,
properties,
bm25_operator, # no keyword operator for hybrid search
None,
distance,
target_vector,
),
limit=limit,
object_limit=object_limit,
objects_count=objects_count,
)
def near_media(
self,
*,
media: str,
type_: Literal["audio", "depth", "image", "imu", "thermal", "video"],
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
target_vector: Optional[TargetVectorJoinType],
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
object_limit: Optional[int],
objects_count: bool,
) -> aggregate_pb2.AggregateRequest:
if self._validate_arguments:
self.__check_vector_search_args(
certainty=certainty,
distance=distance,
object_limit=object_limit,
)
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
limit=limit,
**self._parse_media(
media,
type_,
certainty,
distance,
target_vector,
),
object_limit=object_limit,
objects_count=objects_count,
)
def near_object(
self,
*,
near_object: UUID,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
target_vector: Optional[TargetVectorJoinType],
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
object_limit: Optional[int],
objects_count: bool,
) -> aggregate_pb2.AggregateRequest:
if self._validate_arguments:
self.__check_vector_search_args(
certainty=certainty,
distance=distance,
object_limit=object_limit,
)
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
limit=limit,
near_object=self._parse_near_object(near_object, certainty, distance, target_vector),
object_limit=object_limit,
objects_count=objects_count,
)
def near_text(
self,
*,
near_text: Union[List[str], str],
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
move_to: Optional[Move],
move_away: Optional[Move],
target_vector: Optional[TargetVectorJoinType],
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
object_limit: Optional[int],
objects_count: bool,
) -> aggregate_pb2.AggregateRequest:
if self._validate_arguments:
self.__check_vector_search_args(
certainty=certainty,
distance=distance,
object_limit=object_limit,
)
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
limit=limit,
near_text=self._parse_near_text(
near_text,
certainty,
distance,
move_away=move_away,
move_to=move_to,
target_vector=target_vector,
),
object_limit=object_limit,
objects_count=objects_count,
)
def near_vector(
self,
*,
near_vector: NearVectorInputType,
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
target_vector: Optional[TargetVectorJoinType],
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
object_limit: Optional[int],
objects_count: bool,
) -> aggregate_pb2.AggregateRequest:
if self._validate_arguments:
self.__check_vector_search_args(
certainty=certainty,
distance=distance,
object_limit=object_limit,
)
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
limit=limit,
near_vector=self._parse_near_vector(
near_vector=near_vector,
certainty=certainty,
distance=distance,
target_vector=target_vector,
),
object_limit=object_limit,
objects_count=objects_count,
)
def over_all(
self,
*,
aggregations: List[aggregate_pb2.AggregateRequest.Aggregation],
filters: Optional[base_pb2.Filters],
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy],
limit: Optional[int],
objects_count: bool = False,
) -> aggregate_pb2.AggregateRequest:
return self.__create_request(
aggregations=aggregations,
filters=filters,
group_by=group_by,
limit=limit,
objects_count=objects_count,
)
def __check_vector_search_args(
self,
*,
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
object_limit: Optional[int],
) -> None:
if all([certainty is None, distance is None, object_limit is None]):
raise WeaviateInvalidInputError(
"You must provide at least one of the following arguments: certainty, distance, object_limit when vector searching"
)
def __create_request(
self,
*,
aggregations: Optional[List[aggregate_pb2.AggregateRequest.Aggregation]] = None,
filters: Optional[base_pb2.Filters] = None,
group_by: Optional[aggregate_pb2.AggregateRequest.GroupBy] = None,
hybrid: Optional[base_search_pb2.Hybrid] = None,
limit: Optional[int] = None,
near_object: Optional[base_search_pb2.NearObject] = None,
near_text: Optional[base_search_pb2.NearTextSearch] = None,
near_vector: Optional[base_search_pb2.NearVector] = None,
object_limit: Optional[int] = None,
objects_count: bool = False,
) -> aggregate_pb2.AggregateRequest:
return aggregate_pb2.AggregateRequest(
collection=self._name,
aggregations=aggregations,
filters=filters,
group_by=group_by,
hybrid=hybrid,
limit=limit,
near_object=near_object,
near_text=near_text,
near_vector=near_vector,
object_limit=object_limit,
objects_count=objects_count,
tenant=self._tenant,
)
| _AggregateGRPC |
python | huggingface__transformers | tests/models/vitmatte/test_modeling_vitmatte.py | {
"start": 9259,
"end": 10564
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k")
model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k").to(torch_device)
filepath = hf_hub_download(
repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset"
)
image = Image.open(filepath).convert("RGB")
filepath = hf_hub_download(
repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset"
)
trimap = Image.open(filepath).convert("L")
# prepare image + trimap for the model
inputs = processor(images=image, trimaps=trimap, return_tensors="pt").to(torch_device)
with torch.no_grad():
alphas = model(**inputs).alphas
expected_shape = torch.Size((1, 1, 640, 960))
self.assertEqual(alphas.shape, expected_shape)
expected_slice = torch.tensor(
[[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]], device=torch_device
)
torch.testing.assert_close(alphas[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| VitMatteModelIntegrationTest |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/hoverlabel/_font.py | {
"start": 233,
"end": 17158
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea.hoverlabel"
_path_str = "funnelarea.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | PyCQA__pydocstyle | src/pydocstyle/parser.py | {
"start": 8261,
"end": 8459
} | class ____(Definition):
"""A Python source code class."""
_nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s])
is_public = Function.is_public
is_class = True
| Class |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 4578,
"end": 9908
} | class ____:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(
self, config: Zamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
):
self.dtype = dtype
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
self.intermediate_size = int(config.mamba_expand * config.hidden_size)
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.n_mamba_heads = config.n_mamba_heads
self.transformer_layers = []
self._modules = {}
self._parameters = {}
self._buffers = {}
self.conv_states = {}
self.ssm_states = {}
for i in range(config.num_hidden_layers):
self.conv_states[i] = torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state,
self.conv_kernel_size,
device=device,
dtype=dtype,
)
self.ssm_states[i] = torch.zeros(
batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype
)
if self.layers_block_type[i] == "hybrid":
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def __len__(self):
return len(self.key_cache)
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Update the cache
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
if self.get_seq_length() > 0:
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def update_conv_state(
self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
| Zamba2HybridDynamicCache |
python | huggingface__transformers | src/transformers/models/metaclip_2/modeling_metaclip_2.py | {
"start": 44248,
"end": 44814
} | class ____(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
| MetaClip2VisionModelOutput |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 524,
"end": 593
} | class ____(PlainA):
field2 = models.CharField(max_length=30)
| PlainB |
python | PyCQA__isort | isort/_vendored/tomli/_parser.py | {
"start": 4045,
"end": 6263
} | class ____:
"""Flags that map to parsed keys/namespaces."""
# Marks an immutable namespace (inline array or inline table).
FROZEN = 0
# Marks a nest that has been explicitly created and can no longer
# be opened using the "[table]" syntax.
EXPLICIT_NEST = 1
def __init__(self) -> None:
self._flags: Dict[str, dict] = {}
def unset_all(self, key: Key) -> None:
cont = self._flags
for k in key[:-1]:
if k not in cont:
return
cont = cont[k]["nested"]
cont.pop(key[-1], None)
def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None:
cont = self._flags
for k in head_key:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
for k in rel_key:
if k in cont:
cont[k]["flags"].add(flag)
else:
cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
cont = self._flags
key_parent, key_stem = key[:-1], key[-1]
for k in key_parent:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
if key_stem not in cont:
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
def is_(self, key: Key, flag: int) -> bool:
if not key:
return False # document root has no flags
cont = self._flags
for k in key[:-1]:
if k not in cont:
return False
inner_cont = cont[k]
if flag in inner_cont["recursive_flags"]:
return True
cont = inner_cont["nested"]
key_stem = key[-1]
if key_stem in cont:
cont = cont[key_stem]
return flag in cont["flags"] or flag in cont["recursive_flags"]
return False
| Flags |
python | django__django | django/contrib/admin/apps.py | {
"start": 633,
"end": 840
} | class ____(SimpleAdminConfig):
"""The default AppConfig for admin which does autodiscovery."""
default = True
def ready(self):
super().ready()
self.module.autodiscover()
| AdminConfig |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 32795,
"end": 35530
} | class ____:
@pytest.mark.parametrize("verified", [True, False])
def test_password_compromised_email_hibp(
self, pyramid_request, pyramid_config, monkeypatch, verified
):
stub_user = pretend.stub(
id="id",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=verified),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/password-compromised-hibp/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/password-compromised-hibp/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/password-compromised-hibp/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_password_compromised_email_hibp(pyramid_request, stub_user)
assert result == {}
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{stub_user.username} <{stub_user.email}>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": stub_user.email,
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
| TestPasswordCompromisedHIBPEmail |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py | {
"start": 2755,
"end": 13288
} | class ____(BaseImageProcessorFast):
model_input_names = ["pixel_values", "prompt_depth"]
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
keep_aspect_ratio = False
ensure_multiple_of = 1
do_pad = False
size_divisor = None
prompt_scale_to_meter = 0.001
valid_kwargs = PromptDepthAnythingImageProcessorKwargs
def __init__(self, **kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
prompt_depth: Optional[ImageInput] = None,
**kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs],
) -> BatchFeature:
r"""
prompt_depth (`ImageInput`, *optional*):
Prompt depth to preprocess.
"""
return super().preprocess(images, prompt_depth, **kwargs)
def resize_with_aspect_ratio(
self,
image: "torch.Tensor",
size: SizeDict,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
interpolation: Optional["F.InterpolationMode"] = None,
) -> "torch.Tensor":
"""
Resize an image to target size while optionally maintaining aspect ratio and ensuring dimensions are multiples.
"""
# Set default interpolation to BICUBIC to match the slow processor (causes slight numerical differences otherwise)
if interpolation is None:
interpolation = F.InterpolationMode.BICUBIC
# Custom resize with aspect ratio preservation and ensure_multiple_of constraint
output_size = _get_resize_output_image_size(
image,
output_size=(size["height"], size["width"]),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
)
# Standard resize method with calculated output size
return self.resize(
image=image,
size=SizeDict(height=output_size[0], width=output_size[1]),
interpolation=interpolation,
)
def pad_image(
self,
image: "torch.Tensor",
size_divisor: int,
) -> "torch.Tensor":
"""
Center pad an image to be a multiple of size_divisor.
"""
def _get_pad(size, size_divisor):
new_size = math.ceil(size / size_divisor) * size_divisor
pad_size = new_size - size
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
height, width = image.shape[-2:]
# Match slow processor and PyTorch convention: width->left/right, height->top/bottom
pad_size_left, pad_size_right = _get_pad(width, size_divisor)
pad_size_top, pad_size_bottom = _get_pad(height, size_divisor)
# Use torchvision padding for fast processing
# /!\ NB: torchvision F.pad expects (left, top, right, bottom) for the last two dims (W then H)
# Source: https://docs.pytorch.org/vision/main/generated/torchvision.transforms.Pad.html
# So: (left=width_pad, top=height_pad, right=width_pad, bottom=height_pad)
padding = [pad_size_left, pad_size_top, pad_size_right, pad_size_bottom]
padded_image = F.pad(image, padding=padding)
return padded_image
def _preprocess_image_like_inputs(
self,
images: ImageInput,
prompt_depth: Optional[ImageInput],
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
prompt_scale_to_meter: Optional[float] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs, including the main images and optional prompt depth.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=False, input_data_format=input_data_format, device=device
) # always use do_convert_rgb=False rather than defining it as a param to match slow processor
# Process images with the standard pipeline
pixel_values = self._preprocess(images, return_tensors=return_tensors, **kwargs)
data = {"pixel_values": pixel_values}
# Process prompt depth if provided
if prompt_depth is not None:
processed_prompt_depths = self._prepare_image_like_inputs(
images=prompt_depth,
do_convert_rgb=False, # Depth maps should not be converted
input_data_format=input_data_format,
device=images[0].device if images else device,
expected_ndims=2,
)
# Validate prompt_depths has same length as images as in slow processor
if len(processed_prompt_depths) != len(images):
raise ValueError(
f"Number of prompt depth images ({len(processed_prompt_depths)}) does not match number of input images ({len(images)})"
)
final_prompt_depths = []
for depth in processed_prompt_depths:
depth = depth * prompt_scale_to_meter
# Handle case where depth is constant (min == max)
if depth.min() == depth.max():
depth[0, 0] = depth[0, 0] + 1e-6 # Add small variation to avoid numerical issues
if depth.ndim == 2: # Add channel dimension if needed
depth = depth.unsqueeze(0) # [H, W] -> [1, H, W] (channels first)
depth = depth.float() # Convert to float32 to match slow processor
final_prompt_depths.append(depth)
if return_tensors:
# Stack while preserving the [H, W, C] format that the slow processor uses
final_prompt_depths = torch.stack(final_prompt_depths, dim=0)
data["prompt_depth"] = final_prompt_depths
return BatchFeature(data=data, tensor_type=return_tensors)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
keep_aspect_ratio: Optional[bool],
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: Optional[bool],
disable_grouping: Optional[bool],
ensure_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
size_divisor: Optional[int] = None,
**kwargs,
) -> "torch.Tensor":
"""
Override the base _preprocess method to handle custom PromptDepthAnything parameters.
"""
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize_with_aspect_ratio(
image=stacked_images,
size=size,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
interpolation=interpolation,
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
if do_pad and size_divisor is not None:
stacked_images = self.pad_image(stacked_images, size_divisor)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Only stack tensors if they all have the same shape and return_tensors is specified
if return_tensors == "pt":
processed_images = torch.stack(processed_images, dim=0)
return processed_images
def post_process_depth_estimation(
self,
outputs: "DepthEstimatorOutput",
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = torch.nn.functional.interpolate(
depth.unsqueeze(0).unsqueeze(1), size=target_size, mode="bicubic", align_corners=False
).squeeze()
results.append({"predicted_depth": depth})
return results
__all__ = ["PromptDepthAnythingImageProcessorFast"]
| PromptDepthAnythingImageProcessorFast |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/unsat_virtual_dependency/package.py | {
"start": 216,
"end": 509
} | class ____(Package):
"""This package has a dependency on a virtual that cannot be provided"""
homepage = "http://www.example.com"
url = "http://www.example.com/v1.0.tgz"
version("1.0", sha256="0123456789abcdef0123456789abcdef")
depends_on("unsatvdep")
| UnsatVirtualDependency |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datastore.py | {
"start": 6001,
"end": 6415
} | class ____:
@mock.patch(HOOK_PATH)
def test_execute(self, mock_hook):
op = CloudDatastoreGetOperationOperator(task_id="test_task", gcp_conn_id=CONN_ID, name=TRANSACTION)
op.execute({})
mock_hook.assert_called_once_with(gcp_conn_id=CONN_ID, impersonation_chain=None)
mock_hook.return_value.get_operation.assert_called_once_with(name=TRANSACTION)
| TestCloudDatastoreGetOperation |
python | pypa__pip | src/pip/_internal/cli/cmdoptions.py | {
"start": 4672,
"end": 31356
} | class ____(Option):
TYPES = Option.TYPES + ("path", "package_name")
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["package_name"] = _package_name_option_check
TYPE_CHECKER["path"] = _path_option_check
###########
# options #
###########
help_: Callable[..., Option] = partial(
Option,
"-h",
"--help",
dest="help",
action="help",
help="Show help.",
)
debug_mode: Callable[..., Option] = partial(
Option,
"--debug",
dest="debug_mode",
action="store_true",
default=False,
help=(
"Let unhandled exceptions propagate outside the main subroutine, "
"instead of logging them to stderr."
),
)
isolated_mode: Callable[..., Option] = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv: Callable[..., Option] = partial(
Option,
"--require-virtualenv",
"--require-venv",
dest="require_venv",
action="store_true",
default=False,
help=(
"Allow pip to only run in a virtual environment; exit with an error otherwise."
),
)
override_externally_managed: Callable[..., Option] = partial(
Option,
"--break-system-packages",
dest="override_externally_managed",
action="store_true",
help="Allow pip to modify an EXTERNALLY-MANAGED Python installation",
)
python: Callable[..., Option] = partial(
Option,
"--python",
dest="python",
help="Run pip with the specified Python interpreter.",
)
verbose: Callable[..., Option] = partial(
Option,
"-v",
"--verbose",
dest="verbose",
action="count",
default=0,
help="Give more output. Option is additive, and can be used up to 3 times.",
)
no_color: Callable[..., Option] = partial(
Option,
"--no-color",
dest="no_color",
action="store_true",
default=False,
help="Suppress colored output.",
)
version: Callable[..., Option] = partial(
Option,
"-V",
"--version",
dest="version",
action="store_true",
help="Show version and exit.",
)
quiet: Callable[..., Option] = partial(
Option,
"-q",
"--quiet",
dest="quiet",
action="count",
default=0,
help=(
"Give less output. Option is additive, and can be used up to 3"
" times (corresponding to WARNING, ERROR, and CRITICAL logging"
" levels)."
),
)
progress_bar: Callable[..., Option] = partial(
Option,
"--progress-bar",
dest="progress_bar",
type="choice",
choices=["auto", "on", "off", "raw"],
default="auto",
help=(
"Specify whether the progress bar should be used. In 'auto'"
" mode, --quiet will suppress all progress bars."
" [auto, on, off, raw] (default: auto)"
),
)
log: Callable[..., Option] = partial(
PipOption,
"--log",
"--log-file",
"--local-log",
dest="log",
metavar="path",
type="path",
help="Path to a verbose appending log.",
)
no_input: Callable[..., Option] = partial(
Option,
# Don't ask for input
"--no-input",
dest="no_input",
action="store_true",
default=False,
help="Disable prompting for input.",
)
keyring_provider: Callable[..., Option] = partial(
Option,
"--keyring-provider",
dest="keyring_provider",
choices=["auto", "disabled", "import", "subprocess"],
default="auto",
help=(
"Enable the credential lookup via the keyring library if user input is allowed."
" Specify which mechanism to use [auto, disabled, import, subprocess]."
" (default: %default)"
),
)
proxy: Callable[..., Option] = partial(
Option,
"--proxy",
dest="proxy",
type="str",
default="",
help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.",
)
retries: Callable[..., Option] = partial(
Option,
"--retries",
dest="retries",
type="int",
default=5,
help="Maximum attempts to establish a new HTTP connection. (default: %default)",
)
resume_retries: Callable[..., Option] = partial(
Option,
"--resume-retries",
dest="resume_retries",
type="int",
default=5,
help="Maximum attempts to resume or restart an incomplete download. "
"(default: %default)",
)
timeout: Callable[..., Option] = partial(
Option,
"--timeout",
"--default-timeout",
metavar="sec",
dest="timeout",
type="float",
default=15,
help="Set the socket timeout (default %default seconds).",
)
def exists_action() -> Option:
return Option(
# Option when path already exist
"--exists-action",
dest="exists_action",
type="choice",
choices=["s", "i", "w", "b", "a"],
default=[],
action="append",
metavar="action",
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
)
cert: Callable[..., Option] = partial(
PipOption,
"--cert",
dest="cert",
type="path",
metavar="path",
help=(
"Path to PEM-encoded CA certificate bundle. "
"If provided, overrides the default. "
"See 'SSL Certificate Verification' in pip documentation "
"for more information."
),
)
client_cert: Callable[..., Option] = partial(
PipOption,
"--client-cert",
dest="client_cert",
type="path",
default=None,
metavar="path",
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
)
index_url: Callable[..., Option] = partial(
Option,
"-i",
"--index-url",
"--pypi-url",
dest="index_url",
metavar="URL",
default=PyPI.simple_url,
help="Base URL of the Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
)
def extra_index_url() -> Option:
return Option(
"--extra-index-url",
dest="extra_index_urls",
metavar="URL",
action="append",
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index: Callable[..., Option] = partial(
Option,
"--no-index",
dest="no_index",
action="store_true",
default=False,
help="Ignore package index (only looking at --find-links URLs instead).",
)
def find_links() -> Option:
return Option(
"-f",
"--find-links",
dest="find_links",
action="append",
default=[],
metavar="url",
help="If a URL or path to an html file, then parse for links to "
"archives such as sdist (.tar.gz) or wheel (.whl) files. "
"If a local path or file:// URL that's a directory, "
"then look for archives in the directory listing. "
"Links to VCS project URLs are not supported.",
)
def trusted_host() -> Option:
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host or host:port pair as trusted, even though it "
"does not have valid or any HTTPS.",
)
def constraints() -> Option:
return Option(
"-c",
"--constraint",
dest="constraints",
action="append",
default=[],
metavar="file",
help="Constrain versions using the given constraints file. "
"This option can be used multiple times.",
)
def build_constraints() -> Option:
return Option(
"--build-constraint",
dest="build_constraints",
action="append",
type="str",
default=[],
metavar="file",
help=(
"Constrain build dependencies using the given constraints file. "
"This option can be used multiple times."
),
)
def requirements() -> Option:
return Option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help="Install from the given requirements file. "
"This option can be used multiple times.",
)
def requirements_from_scripts() -> Option:
return Option(
"--requirements-from-script",
action="append",
default=[],
dest="requirements_from_scripts",
metavar="file",
help="Install dependencies of the given script file"
"as defined by PEP 723 inline metadata. ",
)
def editable() -> Option:
return Option(
"-e",
"--editable",
dest="editables",
action="append",
default=[],
metavar="path/url",
help=(
"Install a project in editable mode (i.e. setuptools "
'"develop mode") from a local project path or a VCS url.'
),
)
def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None:
value = os.path.abspath(value)
setattr(parser.values, option.dest, value)
src: Callable[..., Option] = partial(
PipOption,
"--src",
"--source",
"--source-dir",
"--source-directory",
dest="src_dir",
type="path",
metavar="dir",
default=get_src_prefix(),
action="callback",
callback=_handle_src,
help="Directory to check out editable projects into. "
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".',
)
def _get_format_control(values: Values, option: Option) -> Any:
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.no_binary,
existing.only_binary,
)
def _handle_only_binary(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.only_binary,
existing.no_binary,
)
def no_binary() -> Option:
format_control = FormatControl(set(), set())
return Option(
"--no-binary",
dest="format_control",
action="callback",
callback=_handle_no_binary,
type="str",
default=format_control,
help="Do not use binary packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all binary packages, ":none:" to empty the set (notice '
"the colons), or one or more package names with commas between "
"them (no colons). Note that some packages are tricky to compile "
"and may fail to install when this option is used on them.",
)
def only_binary() -> Option:
format_control = FormatControl(set(), set())
return Option(
"--only-binary",
dest="format_control",
action="callback",
callback=_handle_only_binary,
type="str",
default=format_control,
help="Do not use source packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all source packages, ":none:" to empty the set, or one '
"or more package names with commas between them. Packages "
"without binary distributions will fail to install when this "
"option is used on them.",
)
platforms: Callable[..., Option] = partial(
Option,
"--platform",
dest="platforms",
metavar="platform",
action="append",
default=None,
help=(
"Only use wheels compatible with <platform>. Defaults to the "
"platform of the running system. Use this option multiple times to "
"specify multiple platforms supported by the target interpreter."
),
)
# This was made a separate function for unit-testing purposes.
def _convert_python_version(value: str) -> tuple[tuple[int, ...], str | None]:
"""
Convert a version string like "3", "37", or "3.7.3" into a tuple of ints.
:return: A 2-tuple (version_info, error_msg), where `error_msg` is
non-None if and only if there was a parsing error.
"""
if not value:
# The empty string is the same as not providing a value.
return (None, None)
parts = value.split(".")
if len(parts) > 3:
return ((), "at most three version parts are allowed")
if len(parts) == 1:
# Then we are in the case of "3" or "37".
value = parts[0]
if len(value) > 1:
parts = [value[0], value[1:]]
try:
version_info = tuple(int(part) for part in parts)
except ValueError:
return ((), "each version part must be an integer")
return (version_info, None)
def _handle_python_version(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
"""
Handle a provided --python-version value.
"""
version_info, error_msg = _convert_python_version(value)
if error_msg is not None:
msg = f"invalid --python-version value: {value!r}: {error_msg}"
raise_option_error(parser, option=option, msg=msg)
parser.values.python_version = version_info
python_version: Callable[..., Option] = partial(
Option,
"--python-version",
dest="python_version",
metavar="python_version",
action="callback",
callback=_handle_python_version,
type="str",
default=None,
help=dedent(
"""\
The Python interpreter version to use for wheel and "Requires-Python"
compatibility checks. Defaults to a version derived from the running
interpreter. The version can be specified using up to three dot-separated
integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
version can also be given as a string without dots (e.g. "37" for 3.7.0).
"""
),
)
implementation: Callable[..., Option] = partial(
Option,
"--implementation",
dest="implementation",
metavar="implementation",
default=None,
help=(
"Only use wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."
),
)
abis: Callable[..., Option] = partial(
Option,
"--abi",
dest="abis",
metavar="abi",
action="append",
default=None,
help=(
"Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
"If not specified, then the current interpreter abi tag is used. "
"Use this option multiple times to specify multiple abis supported "
"by the target interpreter. Generally you will need to specify "
"--implementation, --platform, and --python-version when using this "
"option."
),
)
def add_target_python_options(cmd_opts: OptionGroup) -> None:
cmd_opts.add_option(platforms())
cmd_opts.add_option(python_version())
cmd_opts.add_option(implementation())
cmd_opts.add_option(abis())
def make_target_python(options: Values) -> TargetPython:
target_python = TargetPython(
platforms=options.platforms,
py_version_info=options.python_version,
abis=options.abis,
implementation=options.implementation,
)
return target_python
def prefer_binary() -> Option:
return Option(
"--prefer-binary",
dest="prefer_binary",
action="store_true",
default=False,
help=(
"Prefer binary packages over source packages, even if the "
"source packages are newer."
),
)
cache_dir: Callable[..., Option] = partial(
PipOption,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
type="path",
help="Store the cache data in <dir>.",
)
def _handle_no_cache_dir(
option: Option, opt: str, value: str, parser: OptionParser
) -> None:
"""
Process a value provided for the --no-cache-dir option.
This is an optparse.Option callback for the --no-cache-dir option.
"""
# The value argument will be None if --no-cache-dir is passed via the
# command-line, since the option doesn't accept arguments. However,
# the value can be non-None if the option is triggered e.g. by an
# environment variable, like PIP_NO_CACHE_DIR=true.
if value is not None:
# Then parse the string value to get argument error-checking.
try:
strtobool(value)
except ValueError as exc:
raise_option_error(parser, option=option, msg=str(exc))
# Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
# converted to 0 (like "false" or "no") caused cache_dir to be disabled
# rather than enabled (logic would say the latter). Thus, we disable
# the cache directory not just on values that parse to True, but (for
# backwards compatibility reasons) also on values that parse to False.
# In other words, always set it to False if the option is provided in
# some (valid) form.
parser.values.cache_dir = False
no_cache: Callable[..., Option] = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="callback",
callback=_handle_no_cache_dir,
help="Disable the cache.",
)
no_deps: Callable[..., Option] = partial(
Option,
"--no-deps",
"--no-dependencies",
dest="ignore_dependencies",
action="store_true",
default=False,
help="Don't install package dependencies.",
)
def _handle_dependency_group(
option: Option, opt: str, value: str, parser: OptionParser
) -> None:
"""
Process a value provided for the --group option.
Splits on the rightmost ":", and validates that the path (if present) ends
in `pyproject.toml`. Defaults the path to `pyproject.toml` when one is not given.
`:` cannot appear in dependency group names, so this is a safe and simple parse.
This is an optparse.Option callback for the dependency_groups option.
"""
path, sep, groupname = value.rpartition(":")
if not sep:
path = "pyproject.toml"
else:
# check for 'pyproject.toml' filenames using pathlib
if pathlib.PurePath(path).name != "pyproject.toml":
msg = "group paths use 'pyproject.toml' filenames"
raise_option_error(parser, option=option, msg=msg)
parser.values.dependency_groups.append((path, groupname))
dependency_groups: Callable[..., Option] = partial(
Option,
"--group",
dest="dependency_groups",
default=[],
type=str,
action="callback",
callback=_handle_dependency_group,
metavar="[path:]group",
help='Install a named dependency-group from a "pyproject.toml" file. '
'If a path is given, the name of the file must be "pyproject.toml". '
'Defaults to using "pyproject.toml" in the current directory.',
)
ignore_requires_python: Callable[..., Option] = partial(
Option,
"--ignore-requires-python",
dest="ignore_requires_python",
action="store_true",
help="Ignore the Requires-Python information.",
)
no_build_isolation: Callable[..., Option] = partial(
Option,
"--no-build-isolation",
dest="build_isolation",
action="store_false",
default=True,
help="Disable isolation when building a modern source distribution. "
"Build dependencies specified by PEP 518 must be already installed "
"if this option is used.",
)
check_build_deps: Callable[..., Option] = partial(
Option,
"--check-build-dependencies",
dest="check_build_deps",
action="store_true",
default=False,
help="Check the build dependencies.",
)
use_pep517: Any = partial(
Option,
"--use-pep517",
dest="use_pep517",
action="store_true",
default=True,
help=SUPPRESS_HELP,
)
def _handle_config_settings(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
key, sep, val = value.partition("=")
if sep != "=":
parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL")
dest = getattr(parser.values, option.dest)
if dest is None:
dest = {}
setattr(parser.values, option.dest, dest)
if key in dest:
if isinstance(dest[key], list):
dest[key].append(val)
else:
dest[key] = [dest[key], val]
else:
dest[key] = val
config_settings: Callable[..., Option] = partial(
Option,
"-C",
"--config-settings",
dest="config_settings",
type=str,
action="callback",
callback=_handle_config_settings,
metavar="settings",
help="Configuration settings to be passed to the build backend. "
"Settings take the form KEY=VALUE. Use multiple --config-settings options "
"to pass multiple keys to the backend.",
)
no_clean: Callable[..., Option] = partial(
Option,
"--no-clean",
action="store_true",
default=False,
help="Don't clean up build directories.",
)
pre: Callable[..., Option] = partial(
Option,
"--pre",
action="store_true",
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
)
json: Callable[..., Option] = partial(
Option,
"--json",
action="store_true",
default=False,
help="Output data in a machine-readable JSON format.",
)
disable_pip_version_check: Callable[..., Option] = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
)
root_user_action: Callable[..., Option] = partial(
Option,
"--root-user-action",
dest="root_user_action",
default="warn",
choices=["warn", "ignore"],
help="Action if pip is run as a root user [warn, ignore] (default: warn)",
)
def _handle_merge_hash(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(":", 1)
except ValueError:
parser.error(
f"Arguments to {opt_str} must be a hash name "
"followed by a value, like --hash=sha256:"
"abcde..."
)
if algo not in STRONG_HASHES:
parser.error(
"Allowed hash algorithms for {} are {}.".format(
opt_str, ", ".join(STRONG_HASHES)
)
)
parser.values.hashes.setdefault(algo, []).append(digest)
hash: Callable[..., Option] = partial(
Option,
"--hash",
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest="hashes",
action="callback",
callback=_handle_merge_hash,
type="string",
help="Verify that the package's archive matches this "
"hash before installing. Example: --hash=sha256:abcdef...",
)
require_hashes: Callable[..., Option] = partial(
Option,
"--require-hashes",
dest="require_hashes",
action="store_true",
default=False,
help="Require a hash to check each requirement against, for "
"repeatable installs. This option is implied when any package in a "
"requirements file has a --hash option.",
)
list_path: Callable[..., Option] = partial(
PipOption,
"--path",
dest="path",
type="path",
action="append",
help="Restrict to the specified installation path for listing "
"packages (can be used multiple times).",
)
def check_list_path_option(options: Values) -> None:
if options.path and (options.user or options.local):
raise CommandError("Cannot combine '--path' with '--user' or '--local'")
list_exclude: Callable[..., Option] = partial(
PipOption,
"--exclude",
dest="excludes",
action="append",
metavar="package",
type="package_name",
help="Exclude specified package from the output",
)
no_python_version_warning: Callable[..., Option] = partial(
Option,
"--no-python-version-warning",
dest="no_python_version_warning",
action="store_true",
default=False,
help=SUPPRESS_HELP, # No-op, a hold-over from the Python 2->3 transition.
)
# Features that are now always on. A warning is printed if they are used.
ALWAYS_ENABLED_FEATURES = [
"truststore", # always on since 24.2
"no-binary-enable-wheel-cache", # always on since 23.1
]
use_new_feature: Callable[..., Option] = partial(
Option,
"--use-feature",
dest="features_enabled",
metavar="feature",
action="append",
default=[],
choices=[
"fast-deps",
"build-constraint",
]
+ ALWAYS_ENABLED_FEATURES,
help="Enable new functionality, that may be backward incompatible.",
)
use_deprecated_feature: Callable[..., Option] = partial(
Option,
"--use-deprecated",
dest="deprecated_features_enabled",
metavar="feature",
action="append",
default=[],
choices=[
"legacy-resolver",
"legacy-certs",
],
help=("Enable deprecated functionality, that will be removed in the future."),
)
##########
# groups #
##########
general_group: dict[str, Any] = {
"name": "General Options",
"options": [
help_,
debug_mode,
isolated_mode,
require_virtualenv,
python,
verbose,
version,
quiet,
log,
no_input,
keyring_provider,
proxy,
retries,
timeout,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
no_python_version_warning,
use_new_feature,
use_deprecated_feature,
resume_retries,
],
}
index_group: dict[str, Any] = {
"name": "Package Index Options",
"options": [
index_url,
extra_index_url,
no_index,
find_links,
],
}
| PipOption |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 5839,
"end": 5993
} | class ____(Message):
"""
Indicates a break statement outside of a while or for loop.
"""
message = '\'break\' outside loop'
| BreakOutsideLoop |
python | PyCQA__pylint | tests/functional/m/membership_protocol_py3.py | {
"start": 172,
"end": 253
} | class ____(type):
def __iter__(cls):
return iter((1, 2, 3))
| MetaIterable |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_micro_gemm.py | {
"start": 34772,
"end": 47188
} | class ____(CppMicroGemm):
"""
This class generates the code for micro gemm using Advanced Matrix extension (AMX)
instructions available in 4th generation Intel Xeon for compute.
It supports input types of torch.bfloat16 with fp32 output.
"""
TEMPLATE_ENTRY = r"""
{{declare_kernel}} {
{{kernel.assert_function}}(N % {{block_n}} == 0, "N dimension must be multiple of {{block_n}}");
{{kernel.assert_function}}(K % 2 == 0, "K dimension must be multiple of 2");
{%- if pack_vnni_B_locally %}
{{template.codegen_allocate_weight_buffer("packed_B_buf", input2_t, "K", block_n)}}
{%- endif %}
{%- if use_cached_dequantized_B %}
// Create a stack-allocated buffer for tiles of B.
// Except maybe for the tail-case, an AMX tile of B has 16x32 BF16 elements.
// we cache K * {{block_n}} elements of dequantized B
{{template.codegen_allocate_weight_buffer("dequantized_B_buf", input_t, "K", block_n)}}
const auto buf_size = K * {{block_n}};
auto load_dequantized_B = [&](int base_idx) {
// Load a tile of B & cache it in L1D.
{{input2_t}}* base_addr = const_cast<{{input2_t}}*>(B) + base_idx;
for (int idx_dq = 0, idx_q = 0; idx_dq < buf_size; idx_q += ldb, idx_dq += {{block_n}}) {
{%- for vec_idx in range(0, block_n, 32) %}
_mm_prefetch(base_addr + idx_q + 64 * ldb, _MM_HINT_T0);
{%- if (block_n - vec_idx) >= 32 %}
// 1) Load 32 x int8
__m256i v8 = _mm256_loadu_si256((const __m256i*)(base_addr + idx_q + {{vec_idx}}));
// 2) Extract two halves
__m128i v8_lo = _mm256_extracti128_si256(v8, 0);
__m128i v8_hi = _mm256_extracti128_si256(v8, 1);
// 3) Widen each half to i32
__m512i v32_lo = _mm512_cvtepi8_epi32(v8_lo);
__m512i v32_hi = _mm512_cvtepi8_epi32(v8_hi);
// 4) Convert to f32
__m512 f_lo = _mm512_cvtepi32_ps(v32_lo);
__m512 f_hi = _mm512_cvtepi32_ps(v32_hi);
// 5) f32 -> bf16 (round-to-nearest-even) and pack 32 lanes to 512b
// Packs the second operand (f_lo) into the lower 16 bf16 lanes and the first (f_hi) into the upper 16.
__m512i bf = (__m512i)_mm512_cvtne2ps_pbh(f_hi, f_lo);
// 6) Store 32 x bf16 (512 bits)
_mm512_storeu_si512((__m512i*)(dequantized_B_buf + idx_dq + {{vec_idx}}), bf);
{%- elif (block_n - vec_idx) >= 16 %}
// 1) Load 16 x int8 (128 bits)
__m128i v8 = _mm_loadu_si128((const __m128i*)(base_addr + idx_q + {{vec_idx}}));
// 2) Widen: 16 x i8 -> 16 x i32
__m512i v32 = _mm512_cvtepi8_epi32(v8);
// 3) Convert to f32
__m512 f32 = _mm512_cvtepi32_ps(v32);
// 4) Convert f32 -> bf16 (round-to-nearest-even)
__m256i bf16 = (__m256i)_mm512_cvtneps_pbh(f32);
// 5) Store 16 x bf16 (256 bits)
_mm256_storeu_si256((__m256i*)(dequantized_B_buf + idx_dq + {{vec_idx}}), bf16);
{%- else %}
auto b_int8_tail = at::vec::Vectorized<int8_t>::loadu(
base_addr + idx_q + {{block_n - (block_n % 32)}},
static_cast<int64_t>({{block_n % 32}})
);
auto b_bf16_tail = at::vec::convert<{{input_t}}>(b_int8_tail);
b_bf16_tail.store(
dequantized_B_buf + idx_dq + {{block_n - (block_n % 32)}},
static_cast<int64_t>({{block_n % 32}})
);
{%- endif %}
{%- endfor %}
}
};
{%- endif %}
// The ldb would not be block_n if N != block_n
{%- if use_cached_dequantized_B or pack_vnni_B_locally %}
const int64_t updated_ldb = {{block_n}};
{%- else %}
const int64_t updated_ldb = ldb;
{%- endif %}
// TODO(jgong5): loop unroll for M and N
for (int64_t n = 0; n < N; n += {{block_n}}) {
{%- if pack_vnni_B_locally %}
// Pack non-constant weights into VNNI interleaved format in packed_B_buf
at::vec::pack_vnni2(B + n, packed_B_buf, ldb, K, {{block_n}});
{%- elif use_cached_dequantized_B %}
// Dequantize K * block_n int8 B elements into BF16
load_dequantized_B(n);
{%- endif %}
for (int64_t m = 0; m < M; m += {{block_m}}) {
int64_t block_m = std::min<int64_t>(M - m, {{block_m}});
int64_t m_tail = m;
{%- for num_rows in range(block_m, 0, -16) %}
{%- if num_rows != block_m %}
else
{%- endif %}
if (block_m >= {{num_rows}}) {
{{kernel_name}}_amx_kernel_{{num_rows}}_{{num_columns}}<accum>(
amx_state,
A + m * lda,
{%- if use_cached_dequantized_B %}
dequantized_B_buf,
{%- elif pack_vnni_B_locally %}
packed_B_buf,
{%- else %}
B + n,
{%- endif %}
C + m * ldc + n,
K,
lda,
updated_ldb,
ldc,
16
);
block_m -= {{num_rows}};
m_tail += {{num_rows}};
}
{%- endfor %}
if (block_m > 0) {
{{kernel_name}}_amx_kernel_16_{{num_columns}}<accum>(
amx_state,
A + m_tail * lda,
{%- if use_cached_dequantized_B %}
dequantized_B_buf,
{%- elif pack_vnni_B_locally %}
packed_B_buf,
{%- else %}
B + n,
{%- endif %}
C + m_tail * ldc + n,
K,
lda,
updated_ldb,
ldc,
block_m
);
}
}
}
}
"""
TEMPLATE_KERNEL = r"""
template <bool accum, bool prefetch=false>
inline void {{kernel_name}}_amx_kernel_{{num_rows}}_{{num_columns}}(
AMXState& amx_state,
const {{input_t}}* {{restrict_keyword}} A,
{%- if use_cached_dequantized_B %}
const {{input_t}}* {{restrict_keyword}} B,
{%- else %}
const {{input2_t}}* {{restrict_keyword}} B,
{%- endif %}
{{output_t}}* {{restrict_keyword}} C,
int64_t K,
int64_t lda,
int64_t ldb,
int64_t ldc,
uint8_t tilecfg_rows
) {
// TODO(jgong5): add prefetch hint for A, B, C
auto loadconfig = [](const amx_tilecfg& cfg) {
_tile_loadconfig(&cfg);
};
const auto last_k_offset = K / {{block_k}} * {{block_k}};
const auto tail_k_size = K - last_k_offset;
if C10_LIKELY (last_k_offset > 0) {
amx_state.configure(tilecfg_rows, 64, {{num_rows}} / 16, {{num_columns}}, loadconfig);
} else {
amx_state.configure(tilecfg_rows, tail_k_size * sizeof({{input_t}}), {{num_rows}} / 16, {{num_columns}}, loadconfig);
}
auto load_c = [&]() {
{%- for tile_row in range(num_rows // 16) %}
{%- for tile_col in range(num_columns) %}
{%- set tile_idx = tile_row * num_columns + tile_col %}
_tile_loadd({{tile_idx}}, C + {{tile_row * 16}} * ldc + {{tile_col * 16}}, ldc * sizeof({{output_t}}));
{%- endfor %}
{%- endfor %}
};
auto zero_c = [&]() {
{%- for tile_row in range(num_rows // 16) %}
{%- for tile_col in range(num_columns) %}
{%- set tile_idx = tile_row * num_columns + tile_col %}
_tile_zero({{tile_idx}});
{%- endfor %}
{%- endfor %}
};
if constexpr (accum) {
load_c();
} else {
zero_c();
}
auto compute = [&](int k) {
{%- set tile_offset_a = num_rows // 16 * num_columns %}
{%- set tile_offset_b = tile_offset_a + num_rows // 16 %}
{%- for tile_row in range(num_rows // 16) %}
{%- for tile_col in range(num_columns) %}
{%- set tile_idx_a = tile_offset_a + tile_row %}
{%- set tile_idx_b = tile_offset_b + tile_col %}
{%- set tile_idx_c = tile_row * num_columns + tile_col %}
{%- if tile_col == 0 %}
_tile_stream_loadd({{tile_idx_a}}, A + {{tile_row * 16}} * lda + k, lda * sizeof({{input_t}}));
{%- endif %}
{%- if tile_row == 0 %}
_tile_loadd({{tile_idx_b}}, B + k * ldb + {{tile_col * 16 * vnni_size}}, ldb * {{vnni_size}} * sizeof({{input_t}}));
{%- endif %}
{%- if int8_gemm %}
{%- if input_dtype == torch.int8 %}
_tile_dpbssd({{tile_idx_c}}, {{tile_idx_a}}, {{tile_idx_b}});
{%- else %}
_tile_dpbusd({{tile_idx_c}}, {{tile_idx_a}}, {{tile_idx_b}});
{%- endif %}
{%- else %}
{%- if input_dtype == torch.float16 %}
_tile_dpfp16ps({{tile_idx_c}}, {{tile_idx_a}}, {{tile_idx_b}});
{%- else %}
_tile_dpbf16ps({{tile_idx_c}}, {{tile_idx_a}}, {{tile_idx_b}});
{%- endif %}
{%- endif %}
{%- endfor %}
{%- endfor %}
};
{{kernel.unroll_pragma(4)}}
for (int k = 0; k < last_k_offset; k += {{block_k}}) {
compute(k);
}
auto store_c = [&]() {
// store to C
{%- for tile_row in range(num_rows // 16) %}
{%- for tile_col in range(num_columns) %}
{%- set tile_idx = tile_row * num_columns + tile_col %}
_tile_stored({{tile_idx}}, C + {{tile_row * 16}} * ldc + {{tile_col * 16}}, ldc * sizeof({{output_t}}));
{%- endfor %}
{%- endfor %}
};
// TODO(jgong5): move tail k computation to separate loopnest to save tile configuration overhead
if C10_UNLIKELY (tail_k_size > 0) {
if C10_LIKELY (last_k_offset > 0) {
store_c();
amx_state.configure(tilecfg_rows, tail_k_size * sizeof({{input_t}}), {{num_rows}} / 16, {{num_columns}}, loadconfig);
load_c();
}
compute(last_k_offset);
}
store_c();
}
"""
def codegen_define(self, kernel: CppTemplateKernel) -> str:
block_m, block_n, block_k = self.register_blocking
assert block_m % 16 == 0, "Only support block_m % 16 == 0 for AMX"
assert block_n % 16 == 0, "Only support block_n % 16 == 0 for AMX"
if self.input_dtype in [torch.uint8, torch.int8]:
assert block_k == 64, "Only support block_k = 64 for AMX INT8"
else:
assert block_k == 32, "Only support block_k = 32 for AMX Bfloat16/Float16"
num_columns = block_n // 16
options = {
"declare_kernel": self.get_kernel_declaration(),
"use_cached_dequantized_B": (
self.input_dtype == torch.bfloat16
and self.input2_dtype in [torch.int8, torch.uint8]
),
"kernel": kernel,
"block_m": block_m,
"block_n": block_n,
"block_k": block_k,
"num_columns": num_columns,
"restrict_keyword": get_restrict_keyword(),
**self.get_common_options(),
}
result = ""
for num_rows in range(block_m, 0, -16):
amx_kernel_options = {**options, "num_rows": num_rows}
result += KernelTemplate._template_from_string(self.TEMPLATE_KERNEL).render(
amx_kernel_options
)
result += KernelTemplate._template_from_string(self.TEMPLATE_ENTRY).render(
options
)
return result
def codegen_init(
self,
kernel: CppTemplateKernel,
) -> str:
return "AMXState amx_state;"
def codegen_finalize(
self,
kernel: CppTemplateKernel,
) -> str:
return "amx_state.release([]() { _tile_release(); });"
def get_kernel_extra_args_declare(self) -> str:
return "AMXState& amx_state,"
def get_kernel_extra_args(self, **kwargs) -> list[str]:
return ["amx_state,"]
def get_b_layout(self):
if self.input_dtype in [torch.uint8, torch.int8]:
return LayoutType.VNNI4
else:
return LayoutType.VNNI2
# extra check for CppMicroBrgemm
def check_brgemm_extra(config, m, n, k, alpha, num_threads, **kwargs):
assert config.input_dtype == torch.half and config.output_dtype == torch.float
vnni_size = 2
# use brgemm for Half when amx_fp16 is supported
return torch.cpu._is_amx_fp16_supported() and k % vnni_size == 0 and alpha == 1
@register_micro_gemm(
*generate_gemm_config(
VecAMX,
[(32, 32, 32), (48, 16, 32), (16, 48, 32)],
input_dtype=torch.half,
output_dtype=torch.float,
extra_check=check_brgemm_extra,
),
)
| CppMicroGemmAMX |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/type_inference_test.py | {
"start": 1331,
"end": 1795
} | class ____(type_inference.Resolver):
"""A very basic resolver for testing."""
def res_name(self, ns, types_ns, name):
str_name = str(name)
if str_name == 'int':
return {int}, int
return {type(ns[str_name])}, ns[str_name]
def res_value(self, ns, value):
return {type(value)}
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if type_anno is None:
return None
return {str(type_anno)}
| BasicTestResolver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.