language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/greentest/3.11/test_socket.py | {
"start": 177344,
"end": 179518
} | class ____(InterruptedTimeoutBase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
| InterruptedSendTimeoutTest |
python | Pylons__pyramid | src/pyramid/csrf.py | {
"start": 420,
"end": 1557
} | class ____:
"""A CSRF storage policy that defers control of CSRF storage to the
session.
This policy maintains compatibility with legacy ISession implementations
that know how to manage CSRF tokens themselves via
``ISession.new_csrf_token`` and ``ISession.get_csrf_token``.
Note that using this CSRF implementation requires that
a :term:`session factory` is configured.
.. versionadded:: 1.9
"""
def new_csrf_token(self, request):
"""Sets a new CSRF token into the session and returns it."""
return request.session.new_csrf_token()
def get_csrf_token(self, request):
"""Returns the currently active CSRF token from the session,
generating a new one if needed."""
return request.session.get_csrf_token()
def check_csrf_token(self, request, supplied_token):
"""Returns ``True`` if the ``supplied_token`` is valid."""
expected_token = self.get_csrf_token(request)
return not strings_differ(
bytes_(expected_token), bytes_(supplied_token)
)
@implementer(ICSRFStoragePolicy)
| LegacySessionCSRFStoragePolicy |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/task_proxy_operator.py | {
"start": 368,
"end": 2372
} | class ____(BaseDagsterAssetsOperator):
"""An operator that proxies task execution to Dagster assets with metadata that map to this task's dag ID and task ID.
For the DAG ID and task ID that this operator proxies, it expects there to be corresponding assets
in the linked Dagster deployment that have metadata entries with the key `dagster-airlift/task-mapping` that
map to this DAG ID and task ID. This metadata is typically set using the
:py:func:`dagster_airlift.core.assets_with_task_mappings` function.
The following methods must be implemented by subclasses:
- :py:meth:`get_dagster_session` (inherited from :py:class:`BaseDagsterAssetsOperator`)
- :py:meth:`get_dagster_url` (inherited from :py:class:`BaseDagsterAssetsOperator`)
- :py:meth:`build_from_task` A class method which takes the task to be proxied, and constructs
an instance of this operator from it.
There is a default implementation of this operator, :py:class:`DefaultProxyTaskToDagsterOperator`,
which is used by :py:func:`proxying_to_dagster` if no override operator is provided.
"""
@property
def should_defer_asset_events(self) -> bool:
return False
def filter_asset_nodes(
self, context: Context, asset_nodes: Sequence[Mapping[str, Any]]
) -> Iterable[Mapping[str, Any]]:
for asset_node in asset_nodes:
if matched_dag_id_task_id(
asset_node, self.get_airflow_dag_id(context), self.get_airflow_task_id(context)
):
yield asset_node
@classmethod
def build_from_task(cls, task: BaseOperator) -> "BaseProxyTaskToDagsterOperator":
return build_dagster_task(task, cls)
def default_dagster_run_tags(self, context: Context) -> dict[str, str]:
tags = super().default_dagster_run_tags(context)
if self.should_defer_asset_events:
tags[DEFER_ASSET_EVENTS_TAG] = "true"
return tags
| BaseProxyTaskToDagsterOperator |
python | scikit-learn__scikit-learn | sklearn/naive_bayes.py | {
"start": 5092,
"end": 19496
} | class ____(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB).
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see `Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque
<http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf>`_.
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
_parameter_constraints: dict = {
"priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
}
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns the instance itself.
"""
y = validate_data(self, y=y)
xp_y, _ = get_namespace(y)
return self._partial_fit(
X, y, xp_y.unique_values(y), _refit=True, sample_weight=sample_weight
)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return validate_data(self, X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
xp, _ = get_namespace(X)
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(xp.sum(sample_weight))
if np.isclose(n_new, 0.0):
return mu, var
new_mu = _average(X, axis=0, weights=sample_weight, xp=xp)
new_var = _average((X - new_mu) ** 2, axis=0, weights=sample_weight, xp=xp)
else:
n_new = X.shape[0]
new_var = xp.var(X, axis=0)
new_mu = xp.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns the instance itself.
"""
return self._partial_fit(
X, y, classes, _refit=False, sample_weight=sample_weight
)
def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = validate_data(self, X, y, reset=first_call)
xp, _, device_ = get_namespace_and_device(X)
float_dtype = _find_matching_floating_dtype(X, xp=xp)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=float_dtype)
xp_y, _ = get_namespace(y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * xp.max(xp.var(X, axis=0))
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = self.classes_.shape[0]
self.theta_ = xp.zeros(
(n_classes, n_features), dtype=float_dtype, device=device_
)
self.var_ = xp.zeros(
(n_classes, n_features), dtype=float_dtype, device=device_
)
self.class_count_ = xp.zeros(n_classes, dtype=float_dtype, device=device_)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = xp.asarray(self.priors, dtype=float_dtype, device=device_)
# Check that the provided prior matches the number of classes
if priors.shape[0] != n_classes:
raise ValueError("Number of priors must match number of classes.")
# Check that the sum is 1
if not xpx.isclose(xp.sum(priors), 1.0):
raise ValueError("The sum of the priors should be 1.")
# Check that the priors are non-negative
if xp.any(priors < 0):
raise ValueError("Priors must be non-negative.")
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = xp.zeros(
self.classes_.shape[0], dtype=float_dtype, device=device_
)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = xp_y.unique_values(y)
unique_y_in_classes = _isin(unique_y, classes, xp=xp_y)
if not xp_y.all(unique_y_in_classes):
raise ValueError(
"The target label(s) %s in y do not exist in the initial classes %s"
% (unique_y[~unique_y_in_classes], classes)
)
for y_i in unique_y:
i = int(xp_y.searchsorted(classes, y_i))
y_i_mask = xp.asarray(y == y_i, device=device_)
X_i = X[y_i_mask]
if sample_weight is not None:
sw_i = sample_weight[y_i_mask]
N_i = xp.sum(sw_i)
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i
)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / xp.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
xp, _ = get_namespace(X)
joint_log_likelihood = []
for i in range(size(self.classes_)):
jointi = xp.log(self.class_prior_[i])
n_ij = -0.5 * xp.sum(xp.log(2.0 * xp.pi * self.var_[i, :]))
n_ij = n_ij - 0.5 * xp.sum(
((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), axis=1
)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = xp.stack(joint_log_likelihood).T
return joint_log_likelihood
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.array_api_support = True
return tags
| GaussianNB |
python | kamyu104__LeetCode-Solutions | Python/tree-diameter.py | {
"start": 1092,
"end": 1774
} | class ____(object):
def treeDiameter(self, edges):
"""
:type edges: List[List[int]]
:rtype: int
"""
def dfs(u, p):
mx = 0
for v in adj[u]:
if v == p:
continue
curr = dfs(v, u)
result[0] = max(result[0], mx+(curr+1))
mx = max(mx, curr+1)
return mx
adj = [[] for _ in range(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
result = [0]
dfs(0, -1)
return result[0]
# Time: O(|V| + |E|)
# Space: O(|E|)
# bfs, tree dp
| Solution2 |
python | pytorch__pytorch | torch/_export/verifier.py | {
"start": 2669,
"end": 3760
} | class ____(type):
_registry: dict[str, type["Verifier"]] = {}
def __new__(metacls, name, bases, attrs):
if bases:
if "check" in attrs or "_check_graph_module" in attrs:
raise SyntaxError("Overriding method check is not allowed.")
assert "dialect" in attrs and attrs["dialect"] != "ATEN"
else:
assert "check" in attrs
assert "_check_graph_module" in attrs
assert attrs["dialect"] == "ATEN"
assert isinstance(attrs["dialect"], str)
ret = type.__new__(metacls, name, bases, attrs)
metacls._registry[attrs["dialect"]] = ret # type: ignore[assignment]
return ret
def getattr_recursive(obj: Any, target: str) -> Any:
target_atoms = target.split(".")
attr_itr = obj
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(
f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}"
)
attr_itr = getattr(attr_itr, atom)
return attr_itr
| _VerifierMeta |
python | kamyu104__LeetCode-Solutions | Python/add-two-numbers.py | {
"start": 128,
"end": 767
} | class ____(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
current, carry = dummy, 0
while l1 or l2:
val = carry
if l1:
val += l1.val
l1 = l1.next
if l2:
val += l2.val
l2 = l2.next
carry, val = divmod(val, 10)
current.next = ListNode(val)
current = current.next
if carry == 1:
current.next = ListNode(1)
return dummy.next
| Solution |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 386275,
"end": 390587
} | class ____:
# data from https://github.com/scipy/scipy/issues/19460
data = [0.87, 0.87, 1.29, 1.5, 1.7, 0.66, 1.5, 0.5, 1., 1.25, 2.3,
1.03, 2.85, 0.68, 1.74, 1.94, 0.63, 2.04, 1.2, 0.64, 2.05, 0.97,
2.81, 1.02, 2.76, 0.86, 1.36, 1.29, 1.68, 0.72, 1.67, 1.15, 3.26,
0.93, 0.83, 0.91, 0.92, 2.32, 1.12, 3.21, 1.23, 1.22, 1.29, 2.08,
0.64, 2.83, 2.68, 1.77, 0.69, 1.69, 0.7, 1.83, 2.25, 1.23, 1.17,
0.94, 1.22, 0.76, 0.69, 0.48, 1.04, 2.49, 1.38, 1.57, 1.79, 1.59,
1.3, 1.54, 1.07, 1.03, 0.76, 2.35, 2.05, 2.02, 2.36, 1.59, 0.97,
1.63, 1.66, 0.94, 1.45, 1.26, 1.25, 0.68, 2.96, 0.8, 1.16, 0.82,
0.64, 0.87, 1.33, 1.28, 1.26, 1.19, 1.24, 1.12, 1.45, 1.03, 1.37,
1.4, 1.35, 1.28, 1.04, 1.31, 0.87, 0.96, 2.55, 1.72, 1.05, 1.15,
1.73, 1.03, 1.53, 2.41, 1.36, 2.08, 0.92, 0.73, 1.56, 1.94, 0.78]
not_integers = [1.5, [1, 2, 3.5], math.nan, math.inf]
def test_dtype_iv(self, xp):
message = '`sample` must be an array of real numbers.'
with pytest.raises(ValueError, match=message):
stats.lmoment(xp.asarray(self.data, dtype=xp.complex128))
@skip_xp_invalid_arg
def test_dtype_iv_non_numeric(self):
message = '`sample` must be an array of real numbers.'
with pytest.raises(ValueError, match=message):
stats.lmoment(np.array(self.data, dtype=object))
@pytest.mark.parametrize('order', not_integers + [0, -1, [], [[1, 2, 3]]])
def test_order_iv(self, order, xp):
message = '`order` must be a scalar or a non-empty...'
with pytest.raises(ValueError, match=message):
stats.lmoment(xp.asarray(self.data), order=order)
@pytest.mark.parametrize('axis', not_integers)
def test_axis_iv(self, axis, xp):
message = '`axis` must be an integer'
with pytest.raises(ValueError, match=message):
stats.lmoment(xp.asarray(self.data), axis=axis)
@pytest.mark.parametrize('sorted', not_integers)
def test_sorted_iv(self, sorted, xp):
message = '`sorted` must be True or False.'
with pytest.raises(ValueError, match=message):
stats.lmoment(xp.asarray(self.data), sorted=sorted)
@pytest.mark.parametrize('standardize', not_integers)
def test_standardize_iv(self, standardize, xp):
message = '`standardize` must be True or False.'
with pytest.raises(ValueError, match=message):
stats.lmoment(xp.asarray(self.data), standardize=standardize)
@pytest.mark.parametrize('order', [1, 4, [1, 2, 3, 4]])
@pytest.mark.parametrize('standardize', [False, True])
@pytest.mark.parametrize('presorted', [False, True])
def test_lmoment(self, order, standardize, presorted, xp):
# Reference values from R package `lmom`
# options(digits=16)
# library(lmom)
# data= c(0.87, 0.87,..., 1.94, 0.78)
# samlmu(data)
ref = xp.asarray([1.4087603305785130, 0.3415936639118458,
0.2189964482831403, 0.1328186463415905])
if not standardize:
ref = xpx.at(ref)[2:].multiply(ref[1])
data = sorted(self.data) if presorted else self.data
data = xp.asarray(data)
res = stats.lmoment(data, order, standardize=standardize, sorted=presorted)
xp_assert_close(res, ref[xp.asarray(order)-1])
def test_dtype(self, xp):
dtype = xp.float32
sample = xp.asarray(self.data)
res = stats.lmoment(xp.astype(sample, dtype))
ref = xp.astype(stats.lmoment(sample), dtype)
xp_assert_close(res, ref, rtol=1e-4)
dtype = xp.int64
sample = xp.asarray([1, 2, 3, 4, 5])
res = stats.lmoment(xp.astype(sample, dtype))
ref = stats.lmoment(xp.astype(sample, xp_default_dtype(xp)))
xp_assert_close(res, ref)
@pytest.mark.parametrize("axis", [0, 1])
def test_axis(self, axis, xp):
# nd input is tested extensively in `test_axis_nan_policy`, but only for NumPy
rng = np.random.default_rng(234923498149931248151)
x = rng.random(size=(10, 11))
res = stats.lmoment(xp.asarray(x), axis=axis)
ref = xp.asarray(stats.lmoment(x, axis=axis))
xp_assert_close(res, ref)
| TestLMoment |
python | streamlit__streamlit | lib/tests/streamlit/elements/vega_charts_test.py | {
"start": 89430,
"end": 101658
} | class ____(DeltaGeneratorTestCase):
"""Test vega_lite_chart width parameter functionality."""
@parameterized.expand(
[
# width, expected_width_spec, expected_width_value
("stretch", "use_stretch", True),
("content", "use_content", True),
(500, "pixel_width", 500),
]
)
def test_vega_lite_chart_width_combinations(
self,
width: str | int,
expected_width_spec: str,
expected_width_value: bool | int,
):
"""Test vega_lite_chart with various width combinations."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
spec = {
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
}
st.vega_lite_chart(df, spec, width=width)
el = self.get_delta_from_queue().new_element
# Check width configuration
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, expected_width_spec) == expected_width_value
@parameterized.expand(
[
# Test parameters: use_container_width, width, expected_width_spec, expected_width_value
(
True,
None,
"use_stretch",
True,
), # use_container_width=True -> width="stretch"
(
False,
None,
"use_content",
True,
), # use_container_width=False -> width="content"
(
True,
500,
"use_stretch",
True,
), # use_container_width=True overrides integer width
(
True,
"content",
"use_stretch",
True,
), # use_container_width=True overrides string width
(
False,
"content",
"use_content",
True,
), # use_container_width=False, width="content"
(
False,
500,
"pixel_width",
500,
), # use_container_width=False, integer width -> respect integer
]
)
@patch("streamlit.elements.vega_charts.show_deprecation_warning")
def test_vega_lite_chart_use_container_width_deprecation(
self,
use_container_width: bool,
width: int | str | None,
expected_width_spec: str,
expected_width_value: bool | int,
mock_warning: Mock,
):
"""Test that use_container_width shows deprecation warning and is correctly translated to
the new width parameter."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
spec = {
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
}
kwargs = {"use_container_width": use_container_width}
if width is not None:
kwargs["width"] = width
st.vega_lite_chart(df, spec, **kwargs)
mock_warning.assert_called_once()
el = self.get_delta_from_queue().new_element
# Should be translated to the correct width configuration
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, expected_width_spec) == expected_width_value
@parameterized.expand(
[
("width", "invalid_width"),
("width", 0), # width must be positive
("width", -100), # negative width
]
)
def test_vega_lite_chart_validation_errors(
self, param_name: str, invalid_value: str | int
):
"""Test that invalid width values raise validation errors."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
spec = {
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
}
kwargs = {param_name: invalid_value}
with pytest.raises(StreamlitAPIException):
st.vega_lite_chart(df, spec, **kwargs)
def test_vega_lite_chart_width_with_selections(self):
"""Test that width works correctly with selections enabled."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
spec = {
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"params": [{"name": "my_param", "select": {"type": "point"}}],
}
result = st.vega_lite_chart(
df, spec, width=600, on_select="rerun", key="test_chart"
)
el = self.get_delta_from_queue().new_element
# Check width configuration
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
assert el.width_config.pixel_width == 600
# Check that selection state is returned
assert result.selection.my_param == {}
@parameterized.expand(
[
# Test name, spec description, chart spec
(
"regular_chart",
"Regular charts",
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
),
(
"vconcat_chart",
"Vertical concatenation charts",
{
"vconcat": [
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
{
"mark": "point",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
]
},
),
]
)
def test_vega_lite_chart_default_width_stretch_charts(
self, test_name: str, chart_description: str, spec: dict
):
"""Test that certain chart types default to 'stretch' width."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
st.vega_lite_chart(df, spec)
el = self.get_delta_from_queue().new_element
assert el.width_config.WhichOneof("width_spec") == "use_stretch"
assert el.width_config.use_stretch is True
@parameterized.expand(
[
# Test name, spec description, chart spec
(
"facet_chart_in_spec",
"Facet charts (with 'facet' in spec)",
{
"facet": {"field": "a", "type": "ordinal"},
"spec": {
"mark": "bar",
"encoding": {"y": {"field": "b", "type": "quantitative"}},
},
},
),
(
"facet_chart_row_encoding",
"Charts with 'row' in encoding",
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
"row": {"field": "a", "type": "ordinal"},
},
},
),
(
"facet_chart_column_encoding",
"Charts with 'column' in encoding",
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
"column": {"field": "a", "type": "ordinal"},
},
},
),
(
"facet_chart_facet_encoding",
"Charts with 'facet' in encoding",
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
"facet": {"field": "a", "type": "ordinal"},
},
},
),
(
"hconcat_chart",
"Horizontal concatenation charts",
{
"hconcat": [
{
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
{
"mark": "point",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
]
},
),
(
"repeat_chart",
"Repeat charts",
{
"repeat": {"row": ["a", "b"]},
"spec": {
"mark": "bar",
"encoding": {
"x": {"field": {"repeat": "row"}, "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
},
},
),
]
)
def test_vega_lite_chart_default_width_content_charts(
self, test_name: str, chart_description: str, spec: dict
):
"""Test that certain chart types default to 'content' width."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
st.vega_lite_chart(df, spec)
el = self.get_delta_from_queue().new_element
assert el.width_config.WhichOneof("width_spec") == "use_content"
assert el.width_config.use_content is True
def test_vega_lite_chart_default_width_parameter(self):
"""Test that default width parameter is consistently typed across overloads and implementation.
This test verifies that when no width parameter is provided, the internal logic
determines the appropriate default (which varies by chart type), ensuring
consistency between overloads and implementation signatures.
"""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
spec = {
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
}
# Call without specifying width parameter - internal logic determines default
st.vega_lite_chart(df, spec)
el = self.get_delta_from_queue().new_element
# Verify some width configuration is set (the specific default depends on chart type)
assert el.width_config.WhichOneof("width_spec") is not None
# For regular charts, the default is "stretch"
assert el.width_config.WhichOneof("width_spec") == "use_stretch"
assert el.width_config.use_stretch is True
| VegaLiteChartWidthTest |
python | astropy__astropy | astropy/visualization/lupton_rgb.py | {
"start": 1826,
"end": 6636
} | class ____:
"""
Baseclass to map red, blue, green intensities into uint8 values.
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : ndarray, optional
An image used to calculate some parameters of some mappings.
"""
def __init__(self, minimum=None, image=None):
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except TypeError:
minimum = 3 * [minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
self.minimum = minimum
self._image = np.asarray(image)
def make_rgb_image(self, image_r, image_g, image_b):
"""
Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image.
Parameters
----------
image_r : ndarray
Image to map to red.
image_g : ndarray
Image to map to green.
image_b : ndarray
Image to map to blue.
Returns
-------
RGBimage : ndarray
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
image_r = np.asarray(image_r)
image_g = np.asarray(image_g)
image_b = np.asarray(image_b)
if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape):
msg = "The image shapes must match. r: {}, g: {} b: {}"
raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape))
return np.dstack(
self._convert_images_to_uint8(image_r, image_g, image_b)
).astype(np.uint8)
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : ndarray
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : ndarray, optional
Intensity of image to be mapped to green.
image_b : ndarray, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : ndarray
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def map_intensity_to_uint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
Parameters
----------
I : ndarray
Intensity to be mapped.
Returns
-------
mapped_I : ndarray
``I`` mapped to uint8
"""
with np.errstate(invalid="ignore", divide="ignore"):
return np.clip(I, 0, self._uint8Max)
def _convert_images_to_uint8(self, image_r, image_g, image_b):
"""
Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images.
"""
image_r = image_r - self.minimum[0] # n.b. makes copy
image_g = image_g - self.minimum[1]
image_b = image_b - self.minimum[2]
fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))
image_rgb = [image_r, image_g, image_b]
for c in image_rgb:
c *= fac
with np.errstate(invalid="ignore"):
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
# copies -- could work row by row to minimise memory usage
r0, g0, b0 = image_rgb
# n.b. np.where can't and doesn't short-circuit
with np.errstate(invalid="ignore", divide="ignore"):
for i, c in enumerate(image_rgb):
c = np.where(
r0 > g0,
np.where(
r0 > b0,
np.where(r0 >= pixmax, c * pixmax / r0, c),
np.where(b0 >= pixmax, c * pixmax / b0, c),
),
np.where(
g0 > b0,
np.where(g0 >= pixmax, c * pixmax / g0, c),
np.where(b0 >= pixmax, c * pixmax / b0, c),
),
).astype(np.uint8)
c[c > pixmax] = pixmax
image_rgb[i] = c
return image_rgb
| Mapping |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/service/server_lib.py | {
"start": 5360,
"end": 10942
} | class ____:
"""An in-process tf.data service dispatch server.
A `tf.data.experimental.service.DispatchServer` coordinates a cluster of
`tf.data.experimental.service.WorkerServer`s. When the workers start, they
register themselves with the dispatcher.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
When starting a dedicated tf.data dispatch process, use join() to block
after starting up the server, until the server terminates.
```
dispatcher = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(port=5050))
dispatcher.join()
```
Call stop() to gracefully terminate the dispatcher. The server automatically
stops when all reference to it have been deleted.
To start a `DispatchServer` in fault-tolerant mode, set `work_dir` and
`fault_tolerant_mode` like below:
```
dispatcher = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(
port=5050,
work_dir="gs://my-bucket/dispatcher/work_dir",
fault_tolerant_mode=True))
```
"""
def __init__(self, config=None, start=True):
"""Creates a new dispatch server.
Args:
config: (Optional.) A `tf.data.experimental.service.DispatcherConfig`
configuration. If `None`, the dispatcher will use default configuration
values.
start: (Optional.) Boolean, indicating whether to start the server after
creating it. Defaults to True.
"""
config = config or DispatcherConfig()
if config.fault_tolerant_mode and not config.work_dir:
raise ValueError(
"Cannot enable fault tolerant mode without configuring a work dir. "
"Make sure to set `work_dir` in the `config` object passed to "
"`DispatcherServer`.")
self._config = config
if isinstance(config, service_config_pb2.DispatcherConfig):
config_proto = config
else:
config_proto = service_config_pb2.DispatcherConfig(
port=config.port,
protocol=config.protocol,
work_dir=config.work_dir,
fault_tolerant_mode=config.fault_tolerant_mode,
worker_addresses=config.worker_addresses,
job_gc_check_interval_ms=config.job_gc_check_interval_ms,
job_gc_timeout_ms=config.job_gc_timeout_ms,
worker_timeout_ms=config.worker_timeout_ms,
worker_max_concurrent_snapshots=config.worker_max_concurrent_snapshots
)
self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer(
config_proto.SerializeToString())
if start:
self._server.start()
def start(self):
"""Starts this server.
>>> dispatcher = tf.data.experimental.service.DispatchServer(start=False)
>>> dispatcher.start()
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
starting the server.
"""
self._server.start()
def join(self) -> None:
"""Blocks until the server has shut down.
This is useful when starting a dedicated dispatch process.
```
dispatcher = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(port=5050))
dispatcher.join()
```
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
joining the server.
"""
self._server.join()
def stop(self) -> None:
"""Stops the server.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
stopping the server.
"""
self._stop()
@property
def target(self) -> str:
"""Returns a target that can be used to connect to the server.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
The returned string will be in the form protocol://address, e.g.
"grpc://localhost:5050".
"""
return "{0}://localhost:{1}".format(self._config.protocol,
self._server.bound_port())
def _stop(self) -> None:
"""Stops the server.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
stopping the server.
"""
self._server.stop()
def __del__(self) -> None:
self._stop()
@property
def _address(self) -> str:
"""Returns the address of the server.
The returned string will be in the form address:port, e.g. "localhost:1000".
"""
return "localhost:{0}".format(self._server.bound_port())
def _num_workers(self) -> int:
"""Returns the number of workers registered with the dispatcher."""
return self._server.num_workers()
def _snapshot_streams(
self, path) -> Iterable[_pywrap_server_lib.SnapshotStreamInfoWrapper]:
"""Returns information about all the streams for a snapshot."""
return self._server.snapshot_streams(path)
@tf_export("data.experimental.service.WorkerConfig")
| DispatchServer |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-of-subsequences.py | {
"start": 67,
"end": 799
} | class ____(object):
def maxXorSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def max_xor_subset(nums): # Time: O(nlogr)
base = [0]*l
for x in nums: # gaussian elimination over GF(2)
for b in base:
if x^b < x:
x ^= b
if x:
base.append(x)
max_xor = 0
for b in base: # greedy
if (max_xor^b) > max_xor:
max_xor ^= b
return max_xor
l = max(nums).bit_length()
return max_xor_subset(nums)
# Time: O(nlogr), r = max(nums)
# Space: O(r)
# bitmasks, greedy
| Solution |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 7738,
"end": 8038
} | class ____:
target_E = 0
solution = [0., 0.]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = np.power(np.sin(x[0]**2 - x[1]**2), 2) - 0.5
den = np.power(1 + 0.001 * (x[0]**2 + x[1]**2), 2)
return 0.5 + num / den
| Schaffer2 |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 64629,
"end": 65271
} | class ____(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covariance matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
@deprecated_renamed_argument("use_min_max_bounds", None, "7.0")
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
| DogBoxLSQFitter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1589851,
"end": 1592555
} | class ____(sgqlc.types.Union):
"""An audit entry in an organization audit log."""
__schema__ = github_schema
__types__ = (
MembersCanDeleteReposClearAuditEntry,
MembersCanDeleteReposDisableAuditEntry,
MembersCanDeleteReposEnableAuditEntry,
OauthApplicationCreateAuditEntry,
OrgAddBillingManagerAuditEntry,
OrgAddMemberAuditEntry,
OrgBlockUserAuditEntry,
OrgConfigDisableCollaboratorsOnlyAuditEntry,
OrgConfigEnableCollaboratorsOnlyAuditEntry,
OrgCreateAuditEntry,
OrgDisableOauthAppRestrictionsAuditEntry,
OrgDisableSamlAuditEntry,
OrgDisableTwoFactorRequirementAuditEntry,
OrgEnableOauthAppRestrictionsAuditEntry,
OrgEnableSamlAuditEntry,
OrgEnableTwoFactorRequirementAuditEntry,
OrgInviteMemberAuditEntry,
OrgInviteToBusinessAuditEntry,
OrgOauthAppAccessApprovedAuditEntry,
OrgOauthAppAccessDeniedAuditEntry,
OrgOauthAppAccessRequestedAuditEntry,
OrgRemoveBillingManagerAuditEntry,
OrgRemoveMemberAuditEntry,
OrgRemoveOutsideCollaboratorAuditEntry,
OrgRestoreMemberAuditEntry,
OrgUnblockUserAuditEntry,
OrgUpdateDefaultRepositoryPermissionAuditEntry,
OrgUpdateMemberAuditEntry,
OrgUpdateMemberRepositoryCreationPermissionAuditEntry,
OrgUpdateMemberRepositoryInvitationPermissionAuditEntry,
PrivateRepositoryForkingDisableAuditEntry,
PrivateRepositoryForkingEnableAuditEntry,
RepoAccessAuditEntry,
RepoAddMemberAuditEntry,
RepoAddTopicAuditEntry,
RepoArchivedAuditEntry,
RepoChangeMergeSettingAuditEntry,
RepoConfigDisableAnonymousGitAccessAuditEntry,
RepoConfigDisableCollaboratorsOnlyAuditEntry,
RepoConfigDisableContributorsOnlyAuditEntry,
RepoConfigDisableSockpuppetDisallowedAuditEntry,
RepoConfigEnableAnonymousGitAccessAuditEntry,
RepoConfigEnableCollaboratorsOnlyAuditEntry,
RepoConfigEnableContributorsOnlyAuditEntry,
RepoConfigEnableSockpuppetDisallowedAuditEntry,
RepoConfigLockAnonymousGitAccessAuditEntry,
RepoConfigUnlockAnonymousGitAccessAuditEntry,
RepoCreateAuditEntry,
RepoDestroyAuditEntry,
RepoRemoveMemberAuditEntry,
RepoRemoveTopicAuditEntry,
RepositoryVisibilityChangeDisableAuditEntry,
RepositoryVisibilityChangeEnableAuditEntry,
TeamAddMemberAuditEntry,
TeamAddRepositoryAuditEntry,
TeamChangeParentTeamAuditEntry,
TeamRemoveMemberAuditEntry,
TeamRemoveRepositoryAuditEntry,
)
| OrganizationAuditEntry |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams1.py | {
"start": 1590,
"end": 1771
} | class ____: ...
def func9[T, **P, S](x: T) -> T:
S = 1
def inner():
# This should generate two errors.
nonlocal T, P
nonlocal S
return x
| ClassI3 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg2.py | {
"start": 19751,
"end": 19846
} | class ____(JSONB):
def result_processor(self, dialect, coltype):
return None
| _PGJSONB |
python | google__jax | tests/pallas/tpu_splash_attention_kernel_test.py | {
"start": 2838,
"end": 3774
} | class ____(Mask):
seq_len: int
left: int | None
right: int | None
offset: int
def get_mask(self) -> mask_lib.Mask:
mask = mask_lib.LocalMask(
(self.seq_len, self.seq_len),
(self.left, self.right),
offset=self.offset,
)
# Make sure that no row is full of zeros as this is leads to undefined
# softmax.
diagonal = mask_lib.NumpyMask(np.identity(self.seq_len, dtype=np.bool_))
return mask | diagonal
@hps.composite
def local_attention_mask_strategy(draw: Draw, seq_len: int) -> Mask:
left_window = draw(
hps.one_of(hps.none(), hps.integers(min_value=0, max_value=seq_len))
)
right_window = draw(
hps.one_of(hps.none(), hps.integers(min_value=0, max_value=seq_len))
)
offset = draw(hps.integers(min_value=-seq_len, max_value=seq_len - 1))
return LocalAttentionMask(seq_len, left_window, right_window, offset=offset)
@dataclasses.dataclass
| LocalAttentionMask |
python | apache__airflow | airflow-ctl/src/airflowctl/ctl/cli_parser.py | {
"start": 2915,
"end": 5766
} | class ____(RawTextRichHelpFormatter):
"""
Custom help formatter to display help message.
It resolves lazy help string before printing it using rich.
"""
def add_argument(self, action: Action) -> None:
if isinstance(action.help, lazy_object_proxy.Proxy):
action.help = str(action.help)
return super().add_argument(action)
def add_preview_action(parser: argparse.ArgumentParser) -> None:
"""Add preview action to parser."""
parser.add_argument(
"--preview",
action=HelpPreviewAction,
)
@cache
def get_parser() -> argparse.ArgumentParser:
"""Create and returns command line argument parser."""
parser = DefaultHelpParser(prog="airflowctl", formatter_class=AirflowHelpFormatter)
add_preview_action(parser)
subparsers = parser.add_subparsers(dest="subcommand", metavar="GROUP_OR_COMMAND")
subparsers.required = True
for _, sub in sorted(ALL_COMMANDS_DICT.items()):
_add_command(
subparsers, GroupCommandParser.from_group_command(sub) if isinstance(sub, GroupCommand) else sub
)
return parser
def _sort_args(args: Iterable[Arg]) -> Iterable[Arg]:
"""Sort subcommand optional args, keep positional args."""
def get_long_option(arg: Arg):
"""Get long option from Arg.flags."""
return arg.flags[0] if len(arg.flags) == 1 else arg.flags[1]
positional, optional = partition(lambda x: x.flags[0].startswith("-"), args)
yield from positional
yield from sorted(optional, key=lambda x: get_long_option(x).lower())
def _add_command(subparsers: argparse._SubParsersAction, sub: CLICommand) -> None:
if isinstance(sub, ActionCommand) and sub.hide:
sub_proc = subparsers.add_parser(sub.name, epilog=sub.epilog)
else:
sub_proc = subparsers.add_parser(
sub.name, help=sub.help, description=sub.description or sub.help, epilog=sub.epilog
)
add_preview_action(sub_proc)
sub_proc.formatter_class = LazyRichHelpFormatter
if isinstance(sub, GroupCommandParser):
_add_group_command(sub, sub_proc)
elif isinstance(sub, ActionCommand):
_add_action_command(sub, sub_proc)
else:
raise AirflowCtlException("Invalid command definition.")
def _add_action_command(sub: ActionCommand, sub_proc: argparse.ArgumentParser) -> None:
for arg in _sort_args(sub.args):
arg.add_to_parser(sub_proc)
sub_proc.set_defaults(func=sub.func)
def _add_group_command(sub: GroupCommandParser, sub_proc: argparse.ArgumentParser) -> None:
subcommands = sub.subcommands
sub_subparsers = sub_proc.add_subparsers(dest="subcommand", metavar="COMMAND")
sub_subparsers.required = True
for command in sorted(subcommands, key=lambda x: x.name):
_add_command(sub_subparsers, command)
| LazyRichHelpFormatter |
python | bokeh__bokeh | src/bokeh/protocol/messages/pull_doc_req.py | {
"start": 1424,
"end": 2321
} | class ____(Message[Empty]):
''' Define the ``PULL-DOC-REQ`` message for requesting a Bokeh server reply
with a new Bokeh Document.
The ``content`` fragment of for this message is empty.
'''
msgtype = 'PULL-DOC-REQ'
@classmethod
def create(cls, **metadata: Any) -> pull_doc_req:
''' Create an ``PULL-DOC-REQ`` message
Any keyword arguments will be put into the message ``metadata``
fragment as-is.
'''
header = cls.create_header()
return cls(header, metadata, Empty())
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| pull_doc_req |
python | kubernetes-client__python | kubernetes/client/models/v1_api_service_list.py | {
"start": 383,
"end": 6897
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1APIService]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1APIServiceList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1APIServiceList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1APIServiceList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1APIServiceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1APIServiceList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1APIServiceList. # noqa: E501
Items is the list of APIService # noqa: E501
:return: The items of this V1APIServiceList. # noqa: E501
:rtype: list[V1APIService]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1APIServiceList.
Items is the list of APIService # noqa: E501
:param items: The items of this V1APIServiceList. # noqa: E501
:type: list[V1APIService]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1APIServiceList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1APIServiceList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1APIServiceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1APIServiceList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1APIServiceList. # noqa: E501
:return: The metadata of this V1APIServiceList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1APIServiceList.
:param metadata: The metadata of this V1APIServiceList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1APIServiceList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1APIServiceList):
return True
return self.to_dict() != other.to_dict()
| V1APIServiceList |
python | doocs__leetcode | solution/1600-1699/1679.Max Number of K-Sum Pairs/Solution2.py | {
"start": 0,
"end": 289
} | class ____:
def maxOperations(self, nums: List[int], k: int) -> int:
cnt = Counter()
ans = 0
for x in nums:
if cnt[k - x]:
ans += 1
cnt[k - x] -= 1
else:
cnt[x] += 1
return ans
| Solution |
python | huggingface__transformers | tests/models/zamba/test_modeling_zamba.py | {
"start": 18819,
"end": 22884
} | class ____(unittest.TestCase):
model = None
tokenizer = None
@classmethod
@slow
def setUpClass(cls):
model_id = "Zyphra/Zamba-7B-v1"
cls.model = ZambaForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, use_mamba_kernels=False)
cls.tokenizer = AutoTokenizer.from_pretrained(model_id)
@slow
def test_simple_generate(self):
self.model.to(torch_device)
input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[
"input_ids"
].to(torch_device)
out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10)
output_sentence = self.tokenizer.decode(out[0, :])
self.assertEqual(
output_sentence,
"<s> Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
)
with torch.no_grad():
logits = self.model(input_ids=input_ids).logits
EXPECTED_LOGITS_NO_GRAD = torch.tensor(
[
-7.9375, 8.1875, 1.3984, -6.0000, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, 2.7500, 13.0625, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375
]
, dtype=torch.float32) # fmt: skip
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3)
@slow
def test_simple_batched_generate_with_padding(self):
self.model.to(torch_device)
self.tokenizer.add_special_tokens({"pad_token": "[PAD]"})
self.model.resize_token_embeddings(len(self.tokenizer))
inputs = self.tokenizer(
["Hey how are you doing on this lovely evening?", "Tell me a story"], padding=True, return_tensors="pt"
).to(torch_device)
out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10)
output_sentences = self.tokenizer.batch_decode(out)
self.assertEqual(
output_sentences[0],
"<s> Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
)
self.assertEqual(
output_sentences[1],
"[PAD][PAD][PAD][PAD][PAD][PAD]<s> Tell me a story about a time when you were in a difficult situation",
)
with torch.no_grad():
logits = self.model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]).logits
EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor(
[
-7.9375, 8.1250, 1.3594, -6.0000, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, 2.7344, 13.0625, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375,
-7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375
]
, dtype=torch.float32) # fmt: skip
EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor(
[
-6.3750, 3.4219, 0.6719, -5.0312, -8.5000, -8.5000, -8.5000, -8.5000,
-8.5000, -8.5000, -8.5000, -8.5000, 2.0625, 10.3750, -8.5000, -8.5000,
-8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000,
-8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000,
-8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000
]
, dtype=torch.float32) # fmt: skip
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(logits[1, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_1, rtol=1e-3, atol=1e-3)
| ZambaModelIntegrationTest |
python | pyca__cryptography | tests/hazmat/primitives/test_concatkdf.py | {
"start": 5323,
"end": 12315
} | class ____:
def test_length_limit(self, backend):
big_length = hashes.SHA256().digest_size * (2**32 - 1) + 1
error = OverflowError if sys.maxsize <= 2**31 else ValueError
with pytest.raises(error):
ConcatKDFHMAC(hashes.SHA256(), big_length, None, None, backend)
def test_already_finalized(self, backend):
ckdf = ConcatKDFHMAC(hashes.SHA256(), 16, None, None, backend)
ckdf.derive(b"\x01" * 16)
with pytest.raises(AlreadyFinalized):
ckdf.derive(b"\x02" * 16)
def test_derive(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
okm = binascii.unhexlify(
b"64ce901db10d558661f10b6836a122a7605323ce2f39bf27eaaac8b34cf89f2f"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
assert ckdf.derive(prk) == okm
def test_buffer_protocol(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
okm = binascii.unhexlify(
b"64ce901db10d558661f10b6836a122a7605323ce2f39bf27eaaac8b34cf89f2f"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
assert ckdf.derive(bytearray(prk)) == okm
def test_derive_explicit_salt(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
okm = binascii.unhexlify(
b"64ce901db10d558661f10b6836a122a7605323ce2f39bf27eaaac8b34cf89f2f"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(
hashes.SHA512(), 32, b"\x00" * 128, oinfo, backend
)
assert ckdf.derive(prk) == okm
def test_verify(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
okm = binascii.unhexlify(
b"64ce901db10d558661f10b6836a122a7605323ce2f39bf27eaaac8b34cf89f2f"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
ckdf.verify(prk, okm)
def test_invalid_verify(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
with pytest.raises(InvalidKey):
ckdf.verify(prk, b"wrong key")
def test_unicode_typeerror(self, backend):
with pytest.raises(TypeError):
ConcatKDFHMAC(
hashes.SHA256(),
16,
salt="foo", # type: ignore[arg-type]
otherinfo=None,
backend=backend,
)
with pytest.raises(TypeError):
ConcatKDFHMAC(
hashes.SHA256(),
16,
salt=None,
otherinfo="foo", # type: ignore[arg-type]
backend=backend,
)
with pytest.raises(TypeError):
ckdf = ConcatKDFHMAC(
hashes.SHA256(), 16, salt=None, otherinfo=None, backend=backend
)
ckdf.derive("foo") # type: ignore[arg-type]
with pytest.raises(TypeError):
ckdf = ConcatKDFHMAC(
hashes.SHA256(), 16, salt=None, otherinfo=None, backend=backend
)
ckdf.verify("foo", b"bar") # type: ignore[arg-type]
with pytest.raises(TypeError):
ckdf = ConcatKDFHMAC(
hashes.SHA256(), 16, salt=None, otherinfo=None, backend=backend
)
ckdf.verify(b"foo", "bar") # type: ignore[arg-type]
def test_unsupported_hash_algorithm(self, backend):
# ConcatKDF requires a hash algorithm with an internal block size.
with pytest.raises(TypeError):
ConcatKDFHMAC(
hashes.SHA3_256(),
16,
salt=None,
otherinfo=None,
backend=backend,
)
def test_derive_into(self, backend):
prk = binascii.unhexlify(
b"013951627c1dea63ea2d7702dd24e963eef5faac6b4af7e4"
b"b831cde499dff1ce45f6179f741c728aa733583b02409208"
b"8f0af7fce1d045edbc5790931e8d5ca79c73"
)
oinfo = binascii.unhexlify(
b"a1b2c3d4e55e600be5f367e0e8a465f4bf2704db00c9325c"
b"9fbd216d12b49160b2ae5157650f43415653696421e68e"
)
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
buf = bytearray(32)
n = ckdf.derive_into(prk, buf)
assert n == 32
# Verify the output matches what derive would produce
ckdf2 = ConcatKDFHMAC(hashes.SHA512(), 32, None, oinfo, backend)
expected = ckdf2.derive(prk)
assert buf == expected
@pytest.mark.parametrize(
("buflen", "outlen"), [(31, 32), (33, 32), (16, 32), (64, 32)]
)
def test_derive_into_buffer_incorrect_size(self, buflen, outlen, backend):
ckdf = ConcatKDFHMAC(hashes.SHA512(), outlen, None, None, backend)
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
ckdf.derive_into(b"key", buf)
def test_derive_into_already_finalized(self, backend):
ckdf = ConcatKDFHMAC(hashes.SHA512(), 32, None, None, backend)
buf = bytearray(32)
ckdf.derive_into(b"key", buf)
with pytest.raises(AlreadyFinalized):
ckdf.derive_into(b"key", buf)
| TestConcatKDFHMAC |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 336329,
"end": 337010
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseServerUserAccountEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseServerUserAccount"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| EnterpriseServerUserAccountConnection |
python | numba__numba | numba/tests/test_analysis.py | {
"start": 5160,
"end": 19917
} | class ____(TestBranchPruneBase, SerialMixin):
def test_single_if(self):
def impl(x):
if 1 == 0:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
def impl(x):
if 1 == 1:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
def impl(x):
if x is None:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)
def impl(x):
if x == 10:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
def impl(x):
if x == 10:
z = 3.14159 # noqa: F841 # no effect
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
def impl(x):
z = None
y = z
if x == y:
return 100
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)
def test_single_if_else(self):
def impl(x):
if x is None:
return 3.14159
else:
return 1.61803
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)
def test_single_if_const_val(self):
def impl(x):
if x == 100:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def impl(x):
# switch the condition order
if 100 == x:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def test_single_if_else_two_const_val(self):
def impl(x, y):
if x == y:
return 3.14159
else:
return 1.61803
self.assert_prune(impl, (types.IntegerLiteral(100),) * 2, [None], 100,
100)
self.assert_prune(impl, (types.NoneType('none'),) * 2, [False], None,
None)
self.assert_prune(impl, (types.IntegerLiteral(100),
types.NoneType('none'),), [True], 100, None)
self.assert_prune(impl, (types.IntegerLiteral(100),
types.IntegerLiteral(1000)), [None], 100, 1000)
def test_single_if_else_w_following_undetermined(self):
def impl(x):
x_is_none_work = False
if x is None:
x_is_none_work = True
else:
dead = 7 # noqa: F841 # no effect
if x_is_none_work:
y = 10
else:
y = -3
return y
self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def impl(x):
x_is_none_work = False
if x is None:
x_is_none_work = True
else:
pass
if x_is_none_work:
y = 10
else:
y = -3
return y
# Python 3.10 creates a block with a NOP in it for the `pass` which
# means it gets pruned.
self.assert_prune(impl, (types.NoneType('none'),), [False, None],
None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def test_double_if_else_rt_const(self):
def impl(x):
one_hundred = 100
x_is_none_work = 4
if x is None:
x_is_none_work = 100
else:
dead = 7 # noqa: F841 # no effect
if x_is_none_work == one_hundred:
y = 10
else:
y = -3
return y, x_is_none_work
self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def test_double_if_else_non_literal_const(self):
def impl(x):
one_hundred = 100
if x == one_hundred:
y = 3.14159
else:
y = 1.61803
return y
# no prune as compilation specialization on literal value not permitted
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def test_single_two_branches_same_cond(self):
def impl(x):
if x is None:
y = 10
else:
y = 40
if x is not None:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)
def test_cond_is_kwarg_none(self):
def impl(x=None):
if x is None:
y = 10
else:
y = 40
if x is not None:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.Omitted(None),),
[False, True], None)
self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)
def test_cond_is_kwarg_value(self):
def impl(x=1000):
if x == 1000:
y = 10
else:
y = 40
if x != 1000:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.Omitted(1000),), [None, None], 1000)
self.assert_prune(impl, (types.IntegerLiteral(1000),), [None, None],
1000)
self.assert_prune(impl, (types.IntegerLiteral(0),), [None, None], 0)
self.assert_prune(impl, (types.NoneType('none'),), [True, False], None)
def test_cond_rewrite_is_correct(self):
# this checks that when a condition is replaced, it is replace by a
# true/false bit that correctly represents the evaluated condition
def fn(x):
if x is None:
return 10
return 12
def check(func, arg_tys, bit_val):
func_ir = compile_to_ir(func)
# check there is 1 branch
before_branches = self.find_branches(func_ir)
self.assertEqual(len(before_branches), 1)
# check the condition in the branch is a binop
pred_var = before_branches[0].cond
pred_defn = ir_utils.get_definition(func_ir, pred_var)
self.assertEqual(pred_defn.op, 'call')
condition_var = pred_defn.args[0]
condition_op = ir_utils.get_definition(func_ir, condition_var)
self.assertEqual(condition_op.op, 'binop')
# do the prune, this should kill the dead branch and rewrite the
#'condition to a true/false const bit
if self._DEBUG:
print("=" * 80)
print("before prune")
func_ir.dump()
dead_branch_prune(func_ir, arg_tys)
if self._DEBUG:
print("=" * 80)
print("after prune")
func_ir.dump()
# after mutation, the condition should be a const value `bit_val`
new_condition_defn = ir_utils.get_definition(func_ir, condition_var)
self.assertTrue(isinstance(new_condition_defn, ir.Const))
self.assertEqual(new_condition_defn.value, bit_val)
check(fn, (types.NoneType('none'),), 1)
check(fn, (types.IntegerLiteral(10),), 0)
def test_global_bake_in(self):
def impl(x):
if _GLOBAL == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1)
global _GLOBAL
tmp = _GLOBAL
try:
_GLOBAL = 5
def impl(x):
if _GLOBAL == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1)
finally:
_GLOBAL = tmp
def test_freevar_bake_in(self):
_FREEVAR = 123
def impl(x):
if _FREEVAR == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1)
_FREEVAR = 12
def impl(x):
if _FREEVAR == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1)
def test_redefined_variables_are_not_considered_in_prune(self):
# see issue #4163, checks that if a variable that is an argument is
# redefined in the user code it is not considered const
def impl(array, a=None):
if a is None:
a = 0
if a < 0:
return 10
return 30
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'),),
[None, None],
np.zeros((2, 3)), None)
def test_comparison_operators(self):
# see issue #4163, checks that a variable that is an argument and has
# value None survives TypeError from invalid comparison which should be
# dead
def impl(array, a=None):
x = 0
if a is None:
return 10 # dynamic exec would return here
# static analysis requires that this is executed with a=None,
# hence TypeError
if a < 0:
return 20
return x
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'),),
[False, 'both'],
np.zeros((2, 3)), None)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64,),
[None, None],
np.zeros((2, 3)), 12.)
def test_redefinition_analysis_same_block(self):
# checks that a redefinition in a block with prunable potential doesn't
# break
def impl(array, x, a=None):
b = 2
if x < 4:
b = 12
if a is None: # known true
a = 7 # live
else:
b = 15 # dead
if a < 0: # valid as a result of the redefinition of 'a'
return 10
return 30 + b + a
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none'),),
[None, False, None],
np.zeros((2, 3)), 1., None)
def test_redefinition_analysis_different_block_can_exec(self):
# checks that a redefinition in a block that may be executed prevents
# pruning
def impl(array, x, a=None):
b = 0
if x > 5:
a = 11 # a redefined, cannot tell statically if this will exec
if x < 4:
b = 12
if a is None: # cannot prune, cannot determine if re-defn occurred
b += 5
else:
b += 7
if a < 0:
return 10
return 30 + b
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none'),),
[None, None, None, None],
np.zeros((2, 3)), 1., None)
def test_redefinition_analysis_different_block_cannot_exec(self):
# checks that a redefinition in a block guarded by something that
# has prune potential
def impl(array, x=None, a=None):
b = 0
if x is not None:
a = 11
if a is None:
b += 5
else:
b += 7
return 30 + b
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'), types.NoneType('none')),
[True, None],
np.zeros((2, 3)), None, None)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'), types.float64),
[True, None],
np.zeros((2, 3)), None, 1.2)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none')),
[None, None],
np.zeros((2, 3)), 1.2, None)
def test_closure_and_nonlocal_can_prune(self):
# Closures must be inlined ahead of branch pruning in case nonlocal
# is used. See issue #6585.
def impl():
x = 1000
def closure():
nonlocal x
x = 0
closure()
if x == 0:
return True
else:
return False
self.assert_prune(impl, (), [False,],)
def test_closure_and_nonlocal_cannot_prune(self):
# Closures must be inlined ahead of branch pruning in case nonlocal
# is used. See issue #6585.
def impl(n):
x = 1000
def closure(t):
nonlocal x
x = t
closure(n)
if x == 0:
return True
else:
return False
self.assert_prune(impl, (types.int64,), [None,], 1)
| TestBranchPrune |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 6599,
"end": 6823
} | class ____(AbstractTemplate):
key = "StringView.replace"
def generic(self, args, kws):
return nb_signature(
managed_udf_string, string_view, string_view, recvr=self.this
)
| StringViewReplace |
python | rapidsai__cudf | python/cudf/cudf/core/buffer/buffer.py | {
"start": 7395,
"end": 14185
} | class ____(Serializable):
"""A buffer that represents a slice or view of a `BufferOwner`.
Use the factory function `as_buffer` to create a Buffer instance.
Note
----
This buffer is untyped, so all indexing and sizes are in bytes.
Parameters
----------
owner
The owning exposure buffer this refers to.
offset
The offset relative to the start memory of owner (in bytes).
size
The size of the buffer (in bytes). If None, use the size of owner.
"""
def __init__(
self,
*,
owner: BufferOwner,
offset: int = 0,
size: int | None = None,
) -> None:
size = owner.size if size is None else size
if size < 0:
raise ValueError("size cannot be negative")
if offset < 0:
raise ValueError("offset cannot be negative")
if offset + size > owner.size:
raise ValueError(
"offset+size cannot be greater than the size of owner"
)
self._owner = owner
self._offset = offset
self._size = size
@property
def size(self) -> int:
"""Size of the buffer in bytes."""
return self._size
@property
def nbytes(self) -> int:
"""Size of the buffer in bytes."""
return self._size
@property
def owner(self) -> BufferOwner:
"""Object owning the memory of the buffer."""
return self._owner
def __getitem__(self, key: slice) -> Self:
"""Create a new slice of the buffer."""
if not isinstance(key, slice):
raise TypeError(
"Argument 'key' has incorrect type "
f"(expected slice, got {key.__class__.__name__})"
)
start, stop, step = key.indices(self.size)
if step != 1:
raise ValueError("slice must be C-contiguous")
return self.__class__(
owner=self._owner, offset=self._offset + start, size=stop - start
)
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
return self._owner.get_ptr(mode=mode) + self._offset
def memoryview(self) -> memoryview:
return self._owner.memoryview(offset=self._offset, size=self._size)
def copy(self, deep: bool = True) -> Self:
"""Return a copy of Buffer.
Parameters
----------
deep : bool, default True
- If deep=True, returns a deep copy of the underlying data.
- If deep=False, returns a new `Buffer` instance that refers
to the same `BufferOwner` as this one. Thus, no device
data are being copied.
Returns
-------
Buffer
A new buffer that either refers to either a new or an existing
`BufferOwner` depending on the `deep` argument (see above).
"""
# When doing a shallow copy, we just return a new slice
if not deep:
return self.__class__(
owner=self._owner, offset=self._offset, size=self._size
)
# Otherwise, we create a new copy of the memory
owner = type(self._owner).from_device_memory(
rmm.DeviceBuffer(
ptr=self._owner.get_ptr(mode="read") + self._offset,
size=self.size,
),
exposed=False,
)
return self.__class__(owner=owner, offset=0, size=owner.size)
@property
def __cuda_array_interface__(self) -> Mapping:
"""Implementation of the CUDA Array Interface."""
return {
"data": (self.get_ptr(mode="write"), False),
"shape": (self.size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
def serialize(self) -> tuple[dict, list]:
"""Serialize the buffer into header and frames.
The frames can be a mixture of memoryview, Buffer, and BufferOwner
objects.
Returns
-------
Tuple[dict, List]
The first element of the returned tuple is a dict containing any
serializable metadata required to reconstruct the object. The
second element is a list containing single frame.
"""
header: dict[str, Any] = {}
header["owner-type-serialized-name"] = type(self._owner).__name__
header["frame_count"] = 1
frames = [self]
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list) -> Self:
"""Create an Buffer from a serialized representation.
Parameters
----------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffer and memoryview that makes up the Buffer.
Returns
-------
Buffer
The deserialized Buffer.
"""
if header["frame_count"] != 1:
raise ValueError("Deserializing a Buffer expect a single frame")
frame = frames[0]
if isinstance(frame, cls):
return frame # The frame is already deserialized
owner_type: BufferOwner = Serializable._name_type_map[
header["owner-type-serialized-name"]
]
if hasattr(frame, "__cuda_array_interface__"):
owner = owner_type.from_device_memory(frame, exposed=False)
else:
owner = owner_type.from_host_memory(frame)
return cls(
owner=owner,
offset=0,
size=owner.size,
)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(owner={self._owner!r}, "
f"offset={self._offset!r}, size={self._size!r})"
)
def __str__(self) -> str:
return (
f"<{self.__class__.__name__} size={format_bytes(self._size)} "
f"offset={format_bytes(self._offset)} of {self._owner}>"
)
def get_ptr_and_size(array_interface: Mapping) -> tuple[int, int]:
"""Retrieve the pointer and size from an array interface.
Raises ValueError if array isn't C-contiguous.
Parameters
----------
array_interface : Mapping
The array interface metadata.
Return
------
pointer : int
The pointer to device or host memory
size : int
The size in bytes
"""
shape = array_interface["shape"] or (1,)
strides = array_interface["strides"]
itemsize = numpy.dtype(array_interface["typestr"]).itemsize
if strides is None or pylibcudf.column.is_c_contiguous(
shape, strides, itemsize
):
nelem = math.prod(shape)
ptr = array_interface["data"][0] or 0
return ptr, nelem * itemsize
raise ValueError("Buffer data must be C-contiguous")
| Buffer |
python | cython__cython | Cython/Compiler/Optimize.py | {
"start": 4850,
"end": 50284
} | class ____(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if node.operand2.is_subscript:
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator='==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=True)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(
pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=False))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterable, reversed=False):
annotation_type = None
if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
annotation = iterable.entry.annotation.expr
if annotation.is_subscript:
annotation = annotation.base # container base type
if Builtin.dict_type in (iterable.type, annotation_type):
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterable, method=None, keys=True, values=False)
if (Builtin.set_type in (iterable.type, annotation_type) or
Builtin.frozenset_type in (iterable.type, annotation_type)):
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_set_iteration(node, iterable)
# C array (slice) iteration?
if iterable.type.is_ptr or iterable.type.is_array:
return self._transform_carray_iteration(node, iterable, reversed=reversed)
if iterable.is_sequence_constructor:
# Convert iteration over homogeneous sequences of C types into array iteration.
env = self.current_env()
item_type = ExprNodes.infer_sequence_item_type(
env, iterable, seq_type=iterable.type)
if item_type and not item_type.is_pyobject and not any(item.is_starred for item in iterable.args):
iterable = ExprNodes.ListNode(iterable.pos, args=iterable.args).analyse_types(env).coerce_to(
PyrexTypes.c_array_type(item_type, len(iterable.args)), env)
return self._transform_carray_iteration(node, iterable, reversed=reversed)
if iterable.is_string_literal:
# Iterate over C array of single character values.
env = self.current_env()
if iterable.type is Builtin.unicode_type:
item_type = PyrexTypes.c_py_ucs4_type
items = map(ord, iterable.value)
else:
item_type = PyrexTypes.c_uchar_type
items = iterable.value
as_int_node = partial(ExprNodes.IntNode.for_int, iterable.pos, type=item_type)
iterable = ExprNodes.ListNode(iterable.pos, args=[as_int_node(ch)for ch in items])
iterable = iterable.analyse_types(env).coerce_to(PyrexTypes.c_array_type(item_type, len(iterable.args)), env)
return self._transform_carray_iteration(node, iterable, reversed=reversed)
if iterable.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterable, reversed=reversed)
if iterable.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterable, reversed=reversed)
# in principle _transform_indexable_iteration would work on most of the above, and
# also tuple and list. However, it probably isn't quite as optimized
if iterable.type is Builtin.bytearray_type:
return self._transform_indexable_iteration(node, iterable, is_mutable=True, reversed=reversed)
if isinstance(iterable, ExprNodes.CoerceToPyTypeNode) and iterable.arg.type.is_memoryviewslice:
return self._transform_indexable_iteration(node, iterable.arg, is_mutable=False, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterable, ExprNodes.SimpleCallNode):
return node
if iterable.args is None:
arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
else:
arg_count = len(iterable.args)
if arg_count and iterable.self is not None:
arg_count -= 1
function = iterable.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterable.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.CallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterable.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterable)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterable)
# range() iteration?
if Options.convert_range and 1 <= arg_count <= 3 and (
iterable.self is None and
function.is_name and function.name in ('range', 'xrange') and
function.entry and function.entry.is_builtin):
if node.target.type.is_int or node.target.type.is_enum:
return self._transform_range_iteration(node, iterable, reversed=reversed)
if node.target.type.is_pyobject:
# Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
if isinstance(arg, ExprNodes.IntNode):
if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
continue
break
else:
return self._transform_range_iteration(node, iterable, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
def _transform_indexable_iteration(self, node, slice_node, is_mutable, reversed=False):
"""Iteration over iterables that Cython has a len() for and knows how to index.
"""
# Generates code that looks approximately like:
#
# done = False
# index = -1
# while not done:
# index += 1
# with critical_section(iterable):
# if index > len(iterable):
# done = True
# continue
# value = iterable[index]
# ...
# else:
# ...
#
# with small adjustments for reverse iteration and non-mutable sequences.
temp_nodes = []
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"),
may_hold_none=False,
is_temp=True,
)
temp_nodes.append(unpack_temp_node)
length_call_node = ExprNodes.SimpleCallNode(
node.pos,
function=ExprNodes.NameNode(
node.pos, name="len",
entry=Builtin.builtin_scope.lookup("len"),
),
args=[unpack_temp_node],
)
if is_mutable:
end_node = length_call_node
else:
end_node = UtilNodes.LetRefNode(length_call_node, type=PyrexTypes.c_py_ssize_t_type)
temp_nodes.append(end_node)
keep_going_ref = UtilNodes.LetRefNode(ExprNodes.BoolNode(node.pos, value=True))
temp_nodes.append(keep_going_ref)
if reversed:
start_node = end_node
end_node = ExprNodes.IntNode.for_size(node.pos, 0)
relation1, relation2 = '>', '>='
else:
start_node = ExprNodes.IntNode.for_size(node.pos, -1)
relation1, relation2 = '<=', '<'
start_check_node = copy.copy(start_node)
counter_ref = UtilNodes.LetRefNode(start_node, type=PyrexTypes.c_py_ssize_t_type)
temp_nodes.append(counter_ref)
test_node = ExprNodes.PrimaryCmpNode(
node.pos,
operator=relation2,
operand1=counter_ref,
operand2=end_node,
)
if is_mutable and reversed:
test_node = ExprNodes.BoolBinopNode(
node.pos,
operator="and",
operand1=test_node,
operand2=ExprNodes.PrimaryCmpNode(
node.pos,
operator=relation1,
operand1=start_check_node,
operand2=counter_ref,
)
)
failed_test_body = Nodes.StatListNode(
node.pos,
stats=[
# set "done" to true and continue. This'll terminate the loop and trigger the else clause
Nodes.SingleAssignmentNode(
node.pos,
lhs=keep_going_ref,
rhs=ExprNodes.BoolNode(node.pos, value=False),
),
Nodes.ContinueStatNode(node.pos),
]
)
target_value = ExprNodes.IndexNode(slice_node.pos, base=unpack_temp_node, index=counter_ref)
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value,
)
# analyse with boundscheck and wraparound
# off (because we're confident we know the size)
env = self.current_env()
new_directives = Options.copy_inherited_directives(env.directives, boundscheck=False, wraparound=False)
target_assign = Nodes.CompilerDirectivesNode(
target_assign.pos,
directives=new_directives,
body=target_assign,
)
length_check_and_target_assign = Nodes.IfStatNode(
node.pos,
if_clauses=[
Nodes.IfClauseNode(
node.pos,
condition=test_node,
body=target_assign,
),
],
else_clause=failed_test_body,
)
if is_mutable:
assert slice_node.type.is_pyobject, slice_node.type
# For mutable containers, the size can change underneath us.
# In freethreaded builds we need to lock around the length check and the indexing.
length_check_and_target_assign = Nodes.CriticalSectionStatNode(
node.pos,
args=[unpack_temp_node],
body=length_check_and_target_assign,
)
length_check_and_target_assign.analyse_declarations(env) # sets up "finally_except_clause"
body = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
node.pos,
lhs=counter_ref,
rhs=ExprNodes.binop_node(
node.pos,
operator="-" if reversed else "+",
inplace=True,
operand1=counter_ref,
operand2=ExprNodes.IntNode.for_size(node.pos, 1),
)
),
length_check_and_target_assign,
# exclude node.body for now to not reanalyse it
])
loop_node = Nodes.WhileStatNode(
node.pos,
condition = keep_going_ref,
body = body,
else_clause = node.else_clause,
)
ret = loop_node
# Initialise the temps that are assigned once on entry to the loop.
for let_ref_node in temp_nodes[::-1]:
ret = UtilNodes.LetNode(let_ref_node, ret)
ret = ret.analyse_expressions(env)
# Reinsert the original loop body after analysing the rest.
body.stats.append(node.body)
return ret
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
], exception_value="NULL")
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
],
exception_value=-1)
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyBytes_AsWritableString",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 1,
# TypeConversions utility code is always included
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 1,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value=-1)
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1')
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_const_char_ptr_type).coerce_to(
PyrexTypes.c_const_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode.for_size(slice_node.pos, len(bytes_value)),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode.for_size(node.pos, 0)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif slice_node.is_subscript:
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, int) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode.for_size(step.pos, abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = step = None
stop = ExprNodes.IntNode.for_size(slice_node.pos, slice_node.type.size)
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode.for_size(slice_node.pos, -1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode.for_size(slice_node.pos, 0)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode.for_int(node.target.pos, 0),
stop=ExprNodes.IntNode.for_int(node.target.pos, 1),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode.for_int(node.target.pos, 0),
base=counter_temp,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode.for_int(enumerate_function.pos, 0, type=counter_type)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode.for_int(node.pos, 1, type=counter_type),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode.for_size(step_pos, 1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, int):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
step = ExprNodes.IntNode.for_size(step_pos, step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode.for_size(range_function.pos, 0)
bound2 = args[0].coerce_to_index(self.current_env())
else:
bound1 = args[0].coerce_to_index(self.current_env())
bound2 = args[1].coerce_to_index(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
bound2_ref_node = None
if reversed:
bound1, bound2 = bound2, bound1
abs_step = abs(step_value)
if abs_step != 1:
if (isinstance(bound1.constant_result, int) and
isinstance(bound2.constant_result, int)):
# calculate final bounds now
if step_value < 0:
begin_value = bound2.constant_result
end_value = bound1.constant_result
bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
else:
begin_value = bound1.constant_result
end_value = bound2.constant_result
bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
bound1 = ExprNodes.IntNode.for_int(
bound1.pos, bound1_value,
type=PyrexTypes.spanning_type(bound1.type, bound2.type))
else:
# evaluate the same expression as above at runtime
bound2_ref_node = UtilNodes.LetRefNode(bound2)
bound1 = self._build_range_step_calculation(
bound1, bound2_ref_node, step, step_value)
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_index(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
for_node.set_up_loop(self.current_env())
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _build_range_step_calculation(self, bound1, bound2_ref_node, step, step_value):
abs_step = abs(step_value)
spanning_type = PyrexTypes.spanning_type(bound1.type, bound2_ref_node.type)
if step.type.is_int and abs_step < 0x7FFF:
# Avoid loss of integer precision warnings.
spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type)
else:
spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
if step_value < 0:
begin_value = bound2_ref_node
end_value = bound1
final_op = '-'
else:
begin_value = bound1
end_value = bound2_ref_node
final_op = '+'
step_calculation_node = ExprNodes.binop_node(
bound1.pos,
operand1=ExprNodes.binop_node(
bound1.pos,
operand1=bound2_ref_node,
operator=final_op, # +/-
operand2=ExprNodes.MulNode(
bound1.pos,
operand1=ExprNodes.IntNode.for_int(bound1.pos, abs_step, type=spanning_step_type),
operator='*',
operand2=ExprNodes.DivNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=begin_value,
operator='-',
operand2=end_value,
type=spanning_type),
operator='-',
operand2=ExprNodes.IntNode.for_int(bound1.pos, 1),
type=spanning_step_type),
operator='//',
operand2=ExprNodes.IntNode.for_int(bound1.pos, abs_step, type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
operator=final_op, # +/-
operand2=ExprNodes.IntNode.for_int(bound1.pos, 1),
type=spanning_type)
return step_calculation_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.IdentifierStringNode(dict_obj.pos, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%{}s'".format('.30' if len(method) <= 30 else ''),
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
is_dict = ExprNodes.IntNode.for_int(node.pos, int(dict_obj.type is Builtin.dict_type))
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode.for_size(node.pos, 0),
),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, is_dict, method_node, dict_len_temp_addr, is_dict_temp_addr],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
PySet_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
])
def _transform_set_iteration(self, node, set_obj):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
set_temp = temp.ref(set_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against set modification
set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(set_len_temp)
set_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=set_len_temp.ref(set_obj.pos),
type=PyrexTypes.c_ptr_type(set_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_set_temp = temp.ref(node.pos)
is_set_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_set_temp,
type=PyrexTypes.c_ptr_type(temp.type))
value_target = node.target
iter_next_node = Nodes.SetIterationNextNode(
set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
is_set = ExprNodes.IntNode.for_int(node.pos, int(set_obj.type is Builtin.set_type))
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs=pos_temp,
rhs=ExprNodes.IntNode.for_size(node.pos, 0),
),
Nodes.SingleAssignmentNode(
set_obj.pos,
lhs=set_temp,
rhs=ExprNodes.PythonCapiCallNode(
set_obj.pos,
"__Pyx_set_iterator",
self.PySet_Iterator_func_type,
utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
args=[set_obj, is_set, set_len_temp_addr, is_set_temp_addr],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition=None,
body=body,
else_clause=node.else_clause,
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
| IterationTransform |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/items/GLScatterPlotItem.py | {
"start": 355,
"end": 459
} | class ____(enum.Flag):
POSITION = enum.auto()
COLOR = enum.auto()
SIZE = enum.auto()
| DirtyFlag |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 7849,
"end": 9136
} | class ____(Regression):
"""Similar to regular ridge regression except that the data is transformed to allow
for polynomial regression.
Parameters:
-----------
degree: int
The degree of the polynomial that the independent variable X will be transformed to.
reg_factor: float
The factor that will determine the amount of regularization and feature
shrinkage.
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01, gradient_descent=True):
self.degree = degree
self.regularization = l2_regularization(alpha=reg_factor)
super(PolynomialRidgeRegression, self).__init__(n_iterations,
learning_rate)
def fit(self, X, y):
X = normalize(polynomial_features(X, degree=self.degree))
super(PolynomialRidgeRegression, self).fit(X, y)
def predict(self, X):
X = normalize(polynomial_features(X, degree=self.degree))
return super(PolynomialRidgeRegression, self).predict(X)
| PolynomialRidgeRegression |
python | google__pytype | pytype/constant_folding.py | {
"start": 2775,
"end": 3020
} | class ____:
"""A folded python constant."""
typ: tuple[str, Any]
value: Any
elements: Any
op: opcodes.Opcode
@property
def tag(self):
return self.typ[0]
def __repr__(self):
return repr(self.value)
@attrs.define
| _Constant |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/communicator_objects/unity_to_external_pb2_grpc.py | {
"start": 2044,
"end": 2922
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Exchange(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/communicator_objects.UnityToExternalProto/Exchange',
mlagents__envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessageProto.SerializeToString,
mlagents__envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessageProto.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| UnityToExternalProto |
python | keon__algorithms | tests/test_map.py | {
"start": 4668,
"end": 4910
} | class ____(unittest.TestCase):
def test_is_isomorphic(self):
self.assertTrue(is_isomorphic("egg", "add"))
self.assertFalse(is_isomorphic("foo", "bar"))
self.assertTrue(is_isomorphic("paper", "title"))
| TestIsSomorphic |
python | pypa__setuptools | setuptools/_distutils/compilers/C/errors.py | {
"start": 0,
"end": 72
} | class ____(Exception):
"""Some compile/link operation failed."""
| Error |
python | pypa__pip | src/pip/_internal/models/link.py | {
"start": 944,
"end": 3012
} | class ____:
"""Links to content may have embedded hash values. This class parses those.
`name` must be any member of `_SUPPORTED_HASHES`.
This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
be JSON-serializable to conform to PEP 610, this class contains the logic for
parsing a hash name and value for correctness, and then checking whether that hash
conforms to a schema with `.is_hash_allowed()`."""
name: str
value: str
_hash_url_fragment_re = re.compile(
# NB: we do not validate that the second group (.*) is a valid hex
# digest. Instead, we simply keep that string in this class, and then check it
# against Hashes when hash-checking is needed. This is easier to debug than
# proactively discarding an invalid hex digest, as we handle incorrect hashes
# and malformed hashes in the same place.
r"[#&]({choices})=([^&]*)".format(
choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
),
)
def __post_init__(self) -> None:
assert self.name in _SUPPORTED_HASHES
@classmethod
@functools.cache
def find_hash_url_fragment(cls, url: str) -> LinkHash | None:
"""Search a string for a checksum algorithm name and encoded output value."""
match = cls._hash_url_fragment_re.search(url)
if match is None:
return None
name, value = match.groups()
return cls(name=name, value=value)
def as_dict(self) -> dict[str, str]:
return {self.name: self.value}
def as_hashes(self) -> Hashes:
"""Return a Hashes instance which checks only for the current hash."""
return Hashes({self.name: [self.value]})
def is_hash_allowed(self, hashes: Hashes | None) -> bool:
"""
Return True if the current hash is allowed by `hashes`.
"""
if hashes is None:
return False
return hashes.is_hash_allowed(self.name, hex_digest=self.value)
@dataclass(frozen=True)
| LinkHash |
python | kamyu104__LeetCode-Solutions | Python/total-characters-in-string-after-transformations-i.py | {
"start": 247,
"end": 545
} | class ____(object):
def lengthAfterTransformations(self, s, t):
"""
:type s: str
:type t: int
:rtype: int
"""
return reduce(lambda accu, x: (accu+x)%MOD, (DP[((ord(x)-ord('a'))+t)] for x in s), 0)
# Time: O(n + t + 26)
# Space: O(26)
# dp
| Solution |
python | cython__cython | tests/run/test_genericclass.py | {
"start": 128,
"end": 4991
} | class ____(unittest.TestCase):
def test_mro_entry_signature(self):
tested = []
class B: ...
class C:
def __mro_entries__(self, *args, **kwargs):
tested.extend([args, kwargs])
return (C,)
c = C()
self.assertEqual(tested, [])
class D(B, c): ...
self.assertEqual(tested[0], ((B, c),))
self.assertEqual(tested[1], {})
def test_mro_entry(self):
tested = []
class A: ...
class B: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (self.__class__,)
c = C()
self.assertEqual(tested, [])
class D(A, c, B): ...
self.assertEqual(tested[-1], (A, c, B))
self.assertEqual(D.__bases__, (A, C, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, C, B, object))
d = D()
class E(d): ...
self.assertEqual(tested[-1], (d,))
self.assertEqual(E.__bases__, (D,))
def test_mro_entry_none(self):
tested = []
class A: ...
class B: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return ()
c = C()
self.assertEqual(tested, [])
class D(A, c, B): ...
self.assertEqual(tested[-1], (A, c, B))
self.assertEqual(D.__bases__, (A, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, B, object))
class E(c): ...
self.assertEqual(tested[-1], (c,))
self.assertEqual(E.__bases__, (object,))
self.assertEqual(E.__orig_bases__, (c,))
self.assertEqual(E.__mro__, (E, object))
def test_mro_entry_with_builtins(self):
tested = []
class A: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (dict,)
c = C()
self.assertEqual(tested, [])
class D(A, c): ...
self.assertEqual(tested[-1], (A, c))
self.assertEqual(D.__bases__, (A, dict))
self.assertEqual(D.__orig_bases__, (A, c))
self.assertEqual(D.__mro__, (D, A, dict, object))
def test_mro_entry_with_builtins_2(self):
tested = []
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (C,)
c = C()
self.assertEqual(tested, [])
class D(c, dict): ...
self.assertEqual(tested[-1], (c, dict))
self.assertEqual(D.__bases__, (C, dict))
self.assertEqual(D.__orig_bases__, (c, dict))
self.assertEqual(D.__mro__, (D, C, dict, object))
def test_mro_entry_errors(self):
class C_too_many:
def __mro_entries__(self, bases, something, other):
return ()
c = C_too_many()
with self.assertRaises(TypeError):
class D(c): ...
class C_too_few:
def __mro_entries__(self):
return ()
d = C_too_few()
with self.assertRaises(TypeError):
class D(d): ...
def test_mro_entry_errors_2(self):
class C_not_callable:
__mro_entries__ = "Surprise!"
c = C_not_callable()
with self.assertRaises(TypeError):
class D(c): ...
class C_not_tuple:
def __mro_entries__(self):
return object
c = C_not_tuple()
with self.assertRaises(TypeError):
class D(c): ...
def test_mro_entry_metaclass(self):
meta_args = []
class Meta(type):
def __new__(mcls, name, bases, ns):
meta_args.extend([mcls, name, bases, ns])
return super().__new__(mcls, name, bases, ns)
class A: ...
class C:
def __mro_entries__(self, bases):
return (A,)
c = C()
class D(c, metaclass=Meta):
x = 1
self.assertEqual(meta_args[0], Meta)
self.assertEqual(meta_args[1], 'D')
self.assertEqual(meta_args[2], (A,))
self.assertEqual(meta_args[3]['x'], 1)
self.assertEqual(D.__bases__, (A,))
self.assertEqual(D.__orig_bases__, (c,))
self.assertEqual(D.__mro__, (D, A, object))
self.assertEqual(D.__class__, Meta)
def test_mro_entry_type_call(self):
# Substitution should _not_ happen in direct type call
class C:
def __mro_entries__(self, bases):
return ()
c = C()
with self.assertRaisesRegex(TypeError,
"MRO entry resolution; "
"use types.new_class()"):
type('Bad', (c,), {})
| TestMROEntry |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 7706,
"end": 8139
} | class ____(Converter):
@classmethod
def base_reader(cls, options):
return super(DictConverter, cls).base_reader(Options(options).override(ndim=2))
@classmethod
def read_value(cls, value, options):
assert not value or len(value[0]) == 2
return dict(value)
@classmethod
def write_value(cls, value, options):
return list(value.items())
DictConverter.register(dict)
| DictConverter |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/formatted_text/base.py | {
"start": 3886,
"end": 5086
} | class ____:
"""
Template for string interpolation with formatted text.
Example::
Template(' ... {} ... ').format(HTML(...))
:param text: Plain text.
"""
def __init__(self, text: str) -> None:
assert "{0}" not in text
self.text = text
def format(self, *values: AnyFormattedText) -> AnyFormattedText:
def get_result() -> AnyFormattedText:
# Split the template in parts.
parts = self.text.split("{}")
assert len(parts) - 1 == len(values)
result = FormattedText()
for part, val in zip(parts, values):
result.append(("", part))
result.extend(to_formatted_text(val))
result.append(("", parts[-1]))
return result
return get_result
def merge_formatted_text(items: Iterable[AnyFormattedText]) -> AnyFormattedText:
"""
Merge (Concatenate) several pieces of formatted text together.
"""
def _merge_formatted_text() -> AnyFormattedText:
result = FormattedText()
for i in items:
result.extend(to_formatted_text(i))
return result
return _merge_formatted_text
| Template |
python | pytorch__pytorch | torch/distributed/tensor/_op_schema.py | {
"start": 11145,
"end": 21037
} | class ____:
"""
OpSchema is a data class that describes an operator input schemas, it includes
DTensorSpecs/OpStrategies (instead of DTensor) and non-tensor args/kwargs (positional
order preserved). It is mainly used by the DTensor's dispatching logic to perform various
actions (i.e. sharding propagation, caching sharding decisions, redistribute, etc.)
NOTE: this must be used as a read only data class
TODO: make this a frozen dataclass
Args:
op: the operator overload we are intercepting
args_schema: contains args except that the DTensor args have been replaced
with its DTensorSpec or OpStrategy
kwargs_schema: contains kwargs except that the DTensor kwargs have been replaced
with its DTensorSpec or OpStrategy
"""
op: OpOverload
args_schema: ArgsType
kwargs_schema: KwargsType
schema_info: RuntimeSchemaInfo | None = None
_comparison_key: tuple[object, ...] | None = None
@property
def args_spec(self) -> tuple[DTensorSpec, ...]:
"""
args_spec: Tuple[DTensorSpec, ...]: contains a clean list of args spec list
with NO non-DTensor positional arguments (i.e. int/float/tuple, etc)
mainly used by sharding propagation to propagate the output spec
"""
args = (
tree_leaves(self.args_schema)
if self.schema_info is not None and self.schema_info.needs_pytree
else self.args_schema
)
return tuple(item for item in args if isinstance(item, DTensorSpec))
@property
def args_strategy(self) -> tuple[OpStrategy, ...]:
# filter out non-relevant values from args schema to get a clean OpStrategy list
# separate with args_spec for the ease of type annotation
# TODO: see if we should merge this with args_spec
args = (
tree_leaves(self.args_schema)
if self.schema_info is not None and self.schema_info.needs_pytree
else self.args_schema
)
return tuple(item for item in args if isinstance(item, OpStrategy))
@property
def kwargs_strategy(self) -> tuple[OpStrategy, ...]:
# returns OpStrategy items from kwargs_schema.
kwargs_vals = (
tree_leaves(self.kwargs_schema)
if self.schema_info is not None and self.schema_info.needs_pytree
else self.kwargs_schema.values()
)
return tuple(item for item in kwargs_vals if isinstance(item, OpStrategy))
def __repr__(self) -> str:
args_schema = ", ".join([str(arg_schema) for arg_schema in self.args_schema])
return (
f"OpSchema(op={self.op},"
f" args_schema=({args_schema}),"
f" kwargs_schema={self.kwargs_schema})"
)
def __str__(self) -> str:
args_schema: list[str] = []
device_mesh = None
for arg in self.args_schema:
if isinstance(arg, DTensorSpec):
args_schema.append(str(arg))
device_mesh = arg.mesh
elif isinstance(arg, OpStrategy):
assert len(arg.strategies) == 1
args_schema.append(_pretty_print_spec(arg.strategies[0].output_specs))
device_mesh = arg.mesh
elif isinstance(arg, TupleStrategy):
first_op_strategy = arg.children[0]
assert isinstance(first_op_strategy, OpStrategy)
device_mesh = first_op_strategy.mesh
args_schema.append(str(arg))
else:
args_schema.append(str(arg))
return f"{self.op}({', '.join(args_schema)}) on {device_mesh})"
def __post_init__(self) -> None:
_DTensor_OpSchema_post_init(self)
def arg_type_tensor_or_tensor_list_like(self, arg: object) -> bool:
is_tensor = isinstance(arg, DTensorSpec)
if is_tensor:
return True
if not isinstance(arg, list):
return False
return all(isinstance(e, DTensorSpec) or e is None for e in arg)
def return_type_tuple_tensor_like(self) -> bool:
# all dispatch ops could only return Tuple[Tensor] or have None/ints/floats
# in the tuple, but the first element must be a Tensor, so this check is enough
return_types = self.op._schema.returns
return len(return_types) > 1 and isinstance(
return_types[0].type, torch.TensorType
)
def return_type_list_tensor_like(self) -> bool:
# returns True if the return type is a List
return_types = self.op._schema.returns
return len(return_types) == 1 and isinstance(
return_types[0].type, torch.ListType
)
def return_type_tensor(self) -> bool:
return_types = self.op._schema.returns
# all dispatch ops only return Tensor or Tuple[Tensor] for tensor like
# return types, so this check is enough for tensor like types
return isinstance(return_types[0].type, torch.TensorType)
def get_mesh_from_args(self, validate: bool = True) -> DeviceMesh:
"""
This util can be used to get a mesh from the OpSchema that contains multiple
DTensors as arguments. When `validate` is True, it will try to validate that all the
arguments have the same mesh to avoid unexpected cross mesh errors.
NOTE: this util currently does not handle TupleStrategy when `validate=True`,
this is because for TupleStrategy there could be different types of checks, i.e.:
- for stack and cat like op, we need to check within a TupleStrategy is every
input is on the same mesh
- for foreach like ops we need to check "zipped" inputs are on the same mesh
for each index.
"""
first_arg = self.args_schema[0]
if isinstance(first_arg, (DTensorSpec, OpStrategy)):
mesh = first_arg.mesh
elif isinstance(first_arg, (list, tuple, TupleStrategy)):
first_elem = (
first_arg.children[0]
if isinstance(first_arg, TupleStrategy)
else first_arg[0]
)
assert isinstance(first_elem, (DTensorSpec, OpStrategy))
mesh = first_elem.mesh
else:
raise ValueError(f"Cannot find device mesh from args for op : {self.op}.")
if validate:
for arg in self.args_schema[1:]:
if isinstance(arg, (DTensorSpec, OpStrategy)) and arg.mesh != mesh:
raise RuntimeError(
f"DTensor does not support cross-mesh operation on {self.op}! "
f"Got meshes: {mesh} {arg.mesh}. "
f"Please make sure all the arguments have the same DeviceMesh."
)
return mesh
def is_inplace_op(self) -> bool:
# simple analysis of function schema to determine
# if this is an inplace variant, it might not
# be entirely correct, but it's good enough for now.
return self.op._schema.name[-1] == "_"
def is_out_variant_op(self) -> bool:
# simple analysis of function schema to determine
# if this is an out variant, it might not
# be entirely correct, but it's good enough for now.
return "out" in self.op._schema.overload_name
def is_view_op(self) -> bool:
return self.op._schema._is_view_op()
def _recompute_comparison_key(self) -> None:
_DTensor_OpSchema_recompute_comparison_key(self)
def __hash__(self) -> int:
return hash(self._comparison_key)
def __eq__(self, other: object) -> bool:
# early return checks
if not isinstance(other, OpSchema):
return False
if self.op != other.op:
return False
if len(self.args_schema) != len(other.args_schema):
return False
return self._comparison_key == other._comparison_key
def gen_fake_args(self) -> ArgsType:
"""
gen_fake_args: generate fake args for the operator, this is mainly used
by sharding propagation rules to generate fake args for the operator
to run the local tensor operator and get the output spec.
"""
return tree_map_only(
DTensorSpec,
_rebuild_tensor_from_dtensor_meta,
self.args_schema,
is_leaf=lambda x: isinstance(x, DTensorSpec),
)
def gen_fake_kwargs(self) -> KwargsType:
"""
gen_fake_kwargs: generate fake kwargs for the operator, this is mainly used
by sharding propagation rules to generate fake kwargs for the operator
to run the local tensor operator and get the output spec.
"""
return tree_map_only(
DTensorSpec,
_rebuild_tensor_from_dtensor_meta,
self.kwargs_schema,
is_leaf=lambda x: isinstance(x, DTensorSpec),
)
def _inplace_rewrap_schema_suggestion(self, origin_schema: "OpSchema") -> None:
suggestion_args_spec = self.args_spec
new_arg_schema: list[object] = []
idx_of_args_spec = 0
if (
origin_schema.schema_info is not None
and origin_schema.schema_info.needs_pytree
):
args_schema: Sequence[Any] = tree_leaves(origin_schema.args_schema)
else:
args_schema = origin_schema.args_schema
for arg in args_schema:
if isinstance(arg, DTensorSpec):
new_arg_schema.append(suggestion_args_spec[idx_of_args_spec])
idx_of_args_spec += 1
else:
new_arg_schema.append(arg)
self.args_schema = tuple(new_arg_schema)
self.kwargs_schema = origin_schema.kwargs_schema
self._recompute_comparison_key()
@dataclass
| OpSchema |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_reconciler.py | {
"start": 2742,
"end": 3680
} | class ____(IResourceScheduler):
def __init__(self, to_launch=None, to_terminate=None):
if to_launch is None:
to_launch = []
if to_terminate is None:
to_terminate = []
self.to_launch = to_launch
self.to_terminate = to_terminate
def schedule(self, req):
return SchedulingReply(
to_launch=self.to_launch,
to_terminate=self.to_terminate,
)
@pytest.fixture()
def setup():
instance_storage = InstanceStorage(
cluster_id="test_cluster_id",
storage=InMemoryStorage(),
)
mock_subscriber = MockSubscriber()
instance_manager = InstanceManager(
instance_storage=instance_storage,
instance_status_update_subscribers=[mock_subscriber],
)
cloud_resource_monitor = CloudResourceMonitor()
yield instance_manager, instance_storage, mock_subscriber, cloud_resource_monitor
| MockScheduler |
python | mlflow__mlflow | dev/clint/src/clint/index.py | {
"start": 965,
"end": 2125
} | class ____:
"""Lightweight function signature information for efficient serialization."""
has_vararg: bool # *args
has_kwarg: bool # **kwargs
args: list[str] = field(default_factory=list) # Regular arguments
kwonlyargs: list[str] = field(default_factory=list) # Keyword-only arguments
posonlyargs: list[str] = field(default_factory=list) # Positional-only arguments
@classmethod
def from_func_def(
cls, node: ast.FunctionDef | ast.AsyncFunctionDef, skip_self: bool = False
) -> Self:
"""Create FunctionInfo from an AST function definition node."""
args = node.args.args
if skip_self and args:
args = args[1:] # Skip 'self' for methods
return cls(
has_vararg=node.args.vararg is not None,
has_kwarg=node.args.kwarg is not None,
args=[arg.arg for arg in args],
kwonlyargs=[arg.arg for arg in node.args.kwonlyargs],
posonlyargs=[arg.arg for arg in node.args.posonlyargs],
)
@property
def all_args(self) -> list[str]:
return self.posonlyargs + self.args + self.kwonlyargs
| FunctionInfo |
python | numba__numba | numba/tests/test_polynomial.py | {
"start": 4481,
"end": 19776
} | class ____(MemoryLeakMixin, TestCase):
#
# tests for Polyutils functions
#
def test_trimseq_basic(self):
pyfunc = trimseq
cfunc = njit(trimseq)
def inputs():
for i in range(5):
yield np.array([1] + [0] * i)
for coefs in inputs():
self.assertPreciseEqual(pyfunc(coefs), cfunc(coefs))
def test_trimseq_exception(self):
cfunc = njit(trimseq)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc("abc")
self.assertIn('The argument "seq" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as e:
cfunc(np.arange(10).reshape(5, 2))
self.assertIn('Coefficient array is not 1-d',
str(e.exception))
with self.assertRaises(TypingError) as e:
cfunc((1, 2, 3, 0))
self.assertIn('Unsupported type UniTuple(int64, 4) for argument "seq"',
str(e.exception))
def test_pu_as_series_basic(self):
pyfunc1 = polyasseries1
cfunc1 = njit(polyasseries1)
pyfunc2 = polyasseries2
cfunc2 = njit(polyasseries2)
def inputs():
yield np.arange(4)
yield np.arange(6).reshape((2,3))
yield (1, np.arange(3), np.arange(2, dtype=np.float32))
yield ([1, 2, 3, 4, 0], [1, 2, 3])
yield ((0, 0, 1e-3, 0, 1e-5, 0, 0), (1, 2, 3, 4, 5, 6, 7))
yield ((0, 0, 1e-3, 0, 1e-5, 0, 0), (1j, 2, 3j, 4j, 5, 6j, 7))
yield (2, [1.1, 0.])
yield ([1, 2, 3, 0], )
yield ((1, 2, 3, 0), )
yield (np.array([1, 2, 3, 0]), )
yield [np.array([1, 2, 3, 0]), np.array([1, 2, 3, 0])]
yield [np.array([1,2,3]), ]
for input in inputs():
self.assertPreciseEqual(pyfunc1(input), cfunc1(input))
self.assertPreciseEqual(pyfunc2(input, False), cfunc2(input, False))
self.assertPreciseEqual(pyfunc2(input, True), cfunc2(input, True))
def test_pu_as_series_exception(self):
cfunc1 = njit(polyasseries1)
cfunc2 = njit(polyasseries2)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc1("abc")
self.assertIn('The argument "alist" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc2("abc", True)
self.assertIn('The argument "alist" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc2(np.arange(4), "abc")
self.assertIn('The argument "trim" must be boolean',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc1(([1, 2, 3], np.arange(16).reshape(4,4)))
self.assertIn('Coefficient array is not 1-d',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc1(np.arange(8).reshape((2, 2, 2)))
self.assertIn('Coefficient array is not 1-d',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc1([np.array([[1,2,3],[1,2,3]]), ])
self.assertIn('Coefficient array is not 1-d',
str(raises.exception))
with self.assertRaises(ValueError) as raises:
cfunc1(np.array([[]], dtype=np.float64))
self.assertIn('Coefficient array is empty',
str(raises.exception))
with self.assertRaises(ValueError) as raises:
cfunc1(([1, 2, 3], np.array([], dtype=np.float64),
np.array([1,2,1])))
self.assertIn('Coefficient array is empty',
str(raises.exception))
def _test_polyarithm_basic(self, pyfunc, ignore_sign_on_zero=False):
# test suite containing tests for polyadd, polysub, polymul, polydiv
cfunc = njit(pyfunc)
def inputs():
# basic, taken from https://github.com/numpy/numpy/blob/48a8277855849be094a5979c48d9f5f1778ee4de/numpy/polynomial/tests/test_polynomial.py#L58-L123 # noqa: E501
for i in range(5):
for j in range(5):
p1 = np.array([0] * i + [1])
p2 = np.array([0] * j + [1])
yield p1, p2
# test lists, tuples, scalars
yield [1, 2, 3], [1, 2, 3]
yield [1, 2, 3], (1, 2, 3)
yield (1, 2, 3), [1, 2, 3]
yield [1, 2, 3], 3
yield 3, (1, 2, 3)
# test different dtypes
yield np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])
yield np.array([1j, 2j, 3j]), np.array([1.0, 2.0, 3.0])
yield np.array([1, 2, 3]), np.array([1j, 2j, 3j])
yield (1, 2, 3), 3.0
yield (1, 2, 3), 3j
yield (1, 1e-3, 3), (1, 2, 3)
for p1, p2 in inputs():
self.assertPreciseEqual(pyfunc(p1,p2), cfunc(p1,p2),
ignore_sign_on_zero=ignore_sign_on_zero)
def _test_polyarithm_exception(self, pyfunc):
# test suite containing tests for polyadd, polysub, polymul, polydiv
cfunc = njit(pyfunc)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc("abc", np.array([1,2,3]))
self.assertIn('The argument "c1" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(np.array([1,2,3]), "abc")
self.assertIn('The argument "c2" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as e:
cfunc(np.arange(10).reshape(5, 2), np.array([1, 2, 3]))
self.assertIn('Coefficient array is not 1-d',
str(e.exception))
with self.assertRaises(TypingError) as e:
cfunc(np.array([1, 2, 3]), np.arange(10).reshape(5, 2))
self.assertIn('Coefficient array is not 1-d',
str(e.exception))
def test_polyadd_basic(self):
self._test_polyarithm_basic(polyadd)
def test_polyadd_exception(self):
self._test_polyarithm_exception(polyadd)
def test_polysub_basic(self):
self._test_polyarithm_basic(polysub, ignore_sign_on_zero=True)
def test_polysub_exception(self):
self._test_polyarithm_exception(polysub)
def test_polymul_basic(self):
self._test_polyarithm_basic(polymul)
def test_polymul_exception(self):
self._test_polyarithm_exception(polymul)
def test_poly_polydiv_basic(self):
pyfunc = polydiv
cfunc = njit(polydiv)
self._test_polyarithm_basic(polydiv)
def inputs():
# Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L99-L114 # noqa: E501
# check scalar division
yield [2], [2]
yield [2, 2], [2]
# check rest.
for i in range(5):
for j in range(5):
ci = [0] * i + [1, 2]
cj = [0] * j + [1, 2]
tgt = poly.polyadd(ci, cj)
yield tgt, ci
yield np.array([1,0,0,0,0,0,-1]), np.array([1,0,0,-1])
for c1, c2 in inputs():
self.assertPreciseEqual(pyfunc(c1, c2), cfunc(c1, c2))
def test_poly_polydiv_exception(self):
self._test_polyarithm_exception(polydiv)
cfunc = njit(polydiv)
# Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L97 # noqa: E501
# check zero division
with self.assertRaises(ZeroDivisionError) as _:
cfunc([1], [0])
def test_poly_polyval_basic(self):
pyfunc2 = polyval2
cfunc2 = njit(polyval2)
pyfunc3T = polyval3T
cfunc3T = njit(polyval3T)
pyfunc3F = polyval3F
cfunc3F = njit(polyval3F)
def inputs():
# Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L137-L157 # noqa: E501
# check empty input
yield np.array([], dtype=np.float64), [1]
yield 1, [1,2,3]
yield np.arange(4).reshape(2,2), [1,2,3]
# check normal input
for i in range(5):
yield np.linspace(-1, 1), [0] * i + [1]
yield np.linspace(-1, 1), [0, -1, 0, 1]
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
yield x, [1]
yield x, [1, 0]
yield x, [1, 0, 0]
# Check that behaviour corresponds to tensor = False
yield np.array([1, 2]), np.arange(4).reshape(2,2)
yield [1, 2], np.arange(4).reshape(2,2)
for x, c in inputs():
self.assertPreciseEqual(pyfunc2(x, c), cfunc2(x, c))
# test tensor argument
self.assertPreciseEqual(pyfunc3T(x, c), cfunc3T(x, c))
self.assertPreciseEqual(pyfunc3F(x, c), cfunc3F(x, c))
def test_poly_polyval_exception(self):
cfunc2 = njit(polyval2)
cfunc3T = njit(polyval3T)
cfunc3F = njit(polyval3F)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc2(3, "abc")
self.assertIn('The argument "c" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc2("abc", 3)
self.assertIn('The argument "x" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc2("abc", "def")
self.assertIn('The argument "x" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc3T(3, "abc")
self.assertIn('The argument "c" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc3T("abc", 3)
self.assertIn('The argument "x" must be array-like',
str(raises.exception))
@njit
def polyval3(x, c, tensor):
res = poly.polyval(x, c, tensor)
return res
with self.assertRaises(TypingError) as raises:
polyval3(3, 3, "abc")
self.assertIn('The argument "tensor" must be boolean',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc3F("abc", "def")
self.assertIn('The argument "x" must be array-like',
str(raises.exception))
def test_poly_polyint_basic(self):
pyfunc = polyint
cfunc = njit(polyint)
# basic
self.assertPreciseEqual(pyfunc([1,2,3]), cfunc([1,2,3]))
# Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L314-L381 # noqa: E501
# test integration of zero polynomial
for i in range(2, 5):
self.assertPreciseEqual(pyfunc([0], m=i), cfunc([0], m=i))
# check single integration with integration constant
for i in range(5):
pol = [0] * i + [1]
self.assertPreciseEqual(pyfunc(pol, m=1), pyfunc(pol, m=1))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
self.assertPreciseEqual(pyfunc(pol, m=j), cfunc(pol, m=j))
# test multidimensional arrays
c2 = np.array([[0,1], [0,2]])
self.assertPreciseEqual(pyfunc(c2), cfunc(c2))
c3 = np.arange(8).reshape((2,2,2))
self.assertPreciseEqual(pyfunc(c3), cfunc(c3))
def test_poly_polyint_exception(self):
cfunc = njit(polyint)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc("abc")
self.assertIn('The argument "c" must be array-like',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(np.array([1,2,3]), "abc")
self.assertIn('The argument "m" must be an integer',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(['a', 'b', 'c'], 1)
self.assertIn('Input dtype must be scalar.',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(('a', 'b', 'c'), 1)
self.assertIn('Input dtype must be scalar.',
str(raises.exception))
#
# tests for Polynomial class
#
def test_Polynomial_constructor(self):
def pyfunc3(c, dom, win):
p = poly.Polynomial(c, dom, win)
return p
cfunc3 = njit(pyfunc3)
def pyfunc1(c):
p = poly.Polynomial(c)
return p
cfunc1 = njit(pyfunc1)
list1 = (np.array([0, 1]), np.array([0., 1.]))
list2 = (np.array([0, 1]), np.array([0., 1.]))
list3 = (np.array([0, 1]), np.array([0., 1.]))
for c in list1:
for dom in list2:
for win in list3:
p1 = pyfunc3(c, dom, win)
p2 = cfunc3(c, dom, win)
q1 = pyfunc1(c)
q2 = cfunc1(c)
self.assertPreciseEqual(p1, p2)
self.assertPreciseEqual(p1.coef, p2.coef)
self.assertPreciseEqual(p1.domain, p2.domain)
self.assertPreciseEqual(p1.window, p2.window)
self.assertPreciseEqual(q1.coef, q2.coef)
self.assertPreciseEqual(q1.domain, q2.domain)
self.assertPreciseEqual(q1.window, q2.window)
def test_Polynomial_exeption(self):
def pyfunc3(c, dom, win):
p = poly.Polynomial(c, dom, win)
return p
cfunc3 = njit(pyfunc3)
self.disable_leak_check()
input2 = np.array([1, 2])
input3 = np.array([1, 2, 3])
input2D = np.arange(4).reshape((2, 2))
with self.assertRaises(ValueError) as raises:
cfunc3(input2, input3, input2)
self.assertIn("Domain has wrong number of elements.",
str(raises.exception))
with self.assertRaises(ValueError) as raises:
cfunc3(input2, input2, input3)
self.assertIn("Window has wrong number of elements.",
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc3(input2D, input2, input2)
self.assertIn("Coefficient array is not 1-d",
str(raises.exception))
| TestPolynomial |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py | {
"start": 785,
"end": 1018
} | class ____:
"""A very helpful docstring.
Docstrings are very important and totally not a waste of time.
"""
ping = "pong"
def __post_init__(self, bar: int = 11, baz: int = 12) -> None: ...
# RUF033
@dataclass
| Foo |
python | huggingface__transformers | src/transformers/models/parakeet/modeling_parakeet.py | {
"start": 21071,
"end": 24014
} | class ____(PreTrainedModel):
config: ParakeetCTCConfig
base_model_prefix = "model"
main_input_name = "input_features"
input_modalities = "audio"
supports_gradient_checkpointing = True
_no_split_modules = ["ParakeetEncoderBlock"]
_supports_flat_attention_mask = True
_supports_sdpa = True
_supports_flex_attn = True
# TODO: @eustlb, add support when flash attention supports custom attention bias
_supports_flash_attn = False
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ParakeetEncoderBlock,
"attentions": ParakeetEncoderAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if hasattr(self.config, "initializer_range"):
std = self.config.initializer_range
else:
# 0.02 is the standard default value across the library
std = getattr(self.config.get_text_config(), "initializer_range", 0.02)
if isinstance(module, ParakeetEncoderAttention):
# Initialize positional bias parameters
init.normal_(module.bias_u, mean=0.0, std=std)
init.normal_(module.bias_v, mean=0.0, std=std)
def _get_subsampling_output_length(self, input_lengths: torch.Tensor):
encoder_config = self.config.encoder_config if isinstance(self.config, ParakeetCTCConfig) else self.config
kernel_size = encoder_config.subsampling_conv_kernel_size
stride = encoder_config.subsampling_conv_stride
num_layers = int(math.log2(encoder_config.subsampling_factor))
all_paddings = (kernel_size - 1) // 2 * 2
add_pad = all_paddings - kernel_size
lengths = input_lengths
for _ in range(num_layers):
lengths = torch.div(lengths.to(dtype=torch.float) + add_pad, stride) + 1.0
lengths = torch.floor(lengths)
return lengths.to(dtype=torch.int)
def _get_output_attention_mask(self, attention_mask: torch.Tensor, target_length: Optional[int] = None):
"""
Convert the input attention mask to its subsampled form. `target_length` sets the desired output length, useful
when the attention mask length differs from `sum(-1).max()` (i.e., when the longest sequence in the batch is padded)
"""
output_lengths = self._get_subsampling_output_length(attention_mask.sum(-1))
# Use target_length if provided, otherwise use max length in batch
max_length = target_length if target_length is not None else output_lengths.max()
attention_mask = torch.arange(max_length, device=attention_mask.device) < output_lengths[:, None]
return attention_mask
@auto_docstring(
custom_intro="""
The Parakeet Encoder model, based on the [Fast Conformer architecture](https://huggingface.co/papers/2305.05084).
"""
)
| ParakeetPreTrainedModel |
python | walkccc__LeetCode | solutions/2360. Longest Cycle in a Graph/2360.py | {
"start": 0,
"end": 494
} | class ____:
def longestCycle(self, edges: list[int]) -> int:
ans = -1
time = 1
timeVisited = [0] * len(edges)
for i, edge in enumerate(edges):
if timeVisited[i]:
continue
startTime = time
u = i
while u != -1 and not timeVisited[u]:
timeVisited[u] = time
time += 1
u = edges[u] # Move to the next node.
if u != -1 and timeVisited[u] >= startTime:
ans = max(ans, time - timeVisited[u])
return ans
| Solution |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 21035,
"end": 27702
} | class ____(unittest.TestCase):
def _callFUT(self, context, name):
from pyramid.traversal import find_resource
return find_resource(context, name)
def _registerTraverser(self, traverser):
from pyramid.threadlocal import get_current_registry
reg = get_current_registry()
from zope.interface import Interface
from pyramid.interfaces import ITraverser
reg.registerAdapter(traverser, (Interface,), ITraverser)
def test_list(self):
resource = DummyContext()
traverser = make_traverser({'context': resource, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, [''])
self.assertEqual(result, resource)
self.assertEqual(resource.request.environ['PATH_INFO'], '/')
def test_generator(self):
resource = DummyContext()
traverser = make_traverser({'context': resource, 'view_name': ''})
self._registerTraverser(traverser)
def foo():
yield ''
result = self._callFUT(resource, foo())
self.assertEqual(result, resource)
self.assertEqual(resource.request.environ['PATH_INFO'], '/')
def test_self_string_found(self):
resource = DummyContext()
traverser = make_traverser({'context': resource, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, '')
self.assertEqual(result, resource)
self.assertEqual(resource.request.environ['PATH_INFO'], '')
def test_self_tuple_found(self):
resource = DummyContext()
traverser = make_traverser({'context': resource, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, ())
self.assertEqual(result, resource)
self.assertEqual(resource.request.environ['PATH_INFO'], '')
def test_relative_string_found(self):
resource = DummyContext()
baz = DummyContext()
traverser = make_traverser({'context': baz, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, 'baz')
self.assertEqual(result, baz)
self.assertEqual(resource.request.environ['PATH_INFO'], 'baz')
def test_relative_tuple_found(self):
resource = DummyContext()
baz = DummyContext()
traverser = make_traverser({'context': baz, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, ('baz',))
self.assertEqual(result, baz)
self.assertEqual(resource.request.environ['PATH_INFO'], 'baz')
def test_relative_string_notfound(self):
resource = DummyContext()
baz = DummyContext()
traverser = make_traverser({'context': baz, 'view_name': 'bar'})
self._registerTraverser(traverser)
self.assertRaises(KeyError, self._callFUT, resource, 'baz')
self.assertEqual(resource.request.environ['PATH_INFO'], 'baz')
def test_relative_tuple_notfound(self):
resource = DummyContext()
baz = DummyContext()
traverser = make_traverser({'context': baz, 'view_name': 'bar'})
self._registerTraverser(traverser)
self.assertRaises(KeyError, self._callFUT, resource, ('baz',))
self.assertEqual(resource.request.environ['PATH_INFO'], 'baz')
def test_absolute_string_found(self):
root = DummyContext()
resource = DummyContext()
resource.__parent__ = root
resource.__name__ = 'baz'
traverser = make_traverser({'context': root, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, '/')
self.assertEqual(result, root)
self.assertEqual(root.wascontext, True)
self.assertEqual(root.request.environ['PATH_INFO'], '/')
def test_absolute_tuple_found(self):
root = DummyContext()
resource = DummyContext()
resource.__parent__ = root
resource.__name__ = 'baz'
traverser = make_traverser({'context': root, 'view_name': ''})
self._registerTraverser(traverser)
result = self._callFUT(resource, ('',))
self.assertEqual(result, root)
self.assertEqual(root.wascontext, True)
self.assertEqual(root.request.environ['PATH_INFO'], '/')
def test_absolute_string_notfound(self):
root = DummyContext()
resource = DummyContext()
resource.__parent__ = root
resource.__name__ = 'baz'
traverser = make_traverser({'context': root, 'view_name': 'fuz'})
self._registerTraverser(traverser)
self.assertRaises(KeyError, self._callFUT, resource, '/')
self.assertEqual(root.wascontext, True)
self.assertEqual(root.request.environ['PATH_INFO'], '/')
def test_absolute_tuple_notfound(self):
root = DummyContext()
resource = DummyContext()
resource.__parent__ = root
resource.__name__ = 'baz'
traverser = make_traverser({'context': root, 'view_name': 'fuz'})
self._registerTraverser(traverser)
self.assertRaises(KeyError, self._callFUT, resource, ('',))
self.assertEqual(root.wascontext, True)
self.assertEqual(root.request.environ['PATH_INFO'], '/')
def test_absolute_unicode_found(self):
# test for bug wiggy found in wild, traceback stack:
# root = '/%E6%B5%81%E8%A1%8C%E8%B6%8B%E5%8A%BF'
# wiggy's code: section=find_resource(page, root)
# find_resource L76: D = traverse(resource, path)
# traverse L291: return traverser(request)
# __call__ line 568: vpath_tuple = traversal_path(vpath)
# lru_cached line 91: f(*arg)
# traversal_path line 443: path.encode('ascii')
# UnicodeEncodeError: 'ascii' codec can't encode characters in
# position 1-12: ordinal not in range(128)
#
# solution: encode string to ascii in pyramid.traversal.traverse
# before passing it along to webob as path_info
from pyramid.traversal import ResourceTreeTraverser
unprintable = DummyContext()
root = DummyContext(unprintable)
unprintable.__parent__ = root
unprintable.__name__ = text_(
b'/\xe6\xb5\x81\xe8\xa1\x8c\xe8\xb6\x8b\xe5\x8a\xbf', 'utf-8'
)
root.__parent__ = None
root.__name__ = None
traverser = ResourceTreeTraverser
self._registerTraverser(traverser)
result = self._callFUT(
root, text_(b'/%E6%B5%81%E8%A1%8C%E8%B6%8B%E5%8A%BF')
)
self.assertEqual(result, unprintable)
| FindResourceTests |
python | great-expectations__great_expectations | tests/scripts/test_public_api_report.py | {
"start": 28705,
"end": 29692
} | class ____:
def test_instantiate_name_and_filepath(self, tmp_path: pathlib.Path):
path = tmp_path / "test_path.py"
path.touch()
IncludeExcludeDefinition(
reason="reason",
name="name",
filepath=path,
)
def test_instantiate_filepath_only(self, tmp_path: pathlib.Path):
path = tmp_path / "test_path.py"
path.touch()
IncludeExcludeDefinition(reason="reason", filepath=path)
def test_instantiate_name_only(self):
with pytest.raises(ValueError) as exc:
IncludeExcludeDefinition(reason="reason", name="name")
assert "You must provide a filepath if also providing a name" in exc.value.args[0]
def test_instantiate_reason_only(self):
with pytest.raises(ValueError) as exc:
IncludeExcludeDefinition(reason="reason")
assert "You must provide at least a filepath or filepath and name" in exc.value.args[0]
| TestIncludeExcludeDefinition |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 6807,
"end": 10455
} | class ____(MimetypeRenderer):
"""
Base class for all HTML mime type renderers
mime type: 'text/html'
"""
def __init__(
self,
connected=False,
full_html=False,
global_init=False,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
include_plotlyjs=True,
):
self.config = dict(config) if config else {}
self.auto_play = auto_play
self.connected = connected
self.global_init = global_init
self.full_html = full_html
self.animation_opts = animation_opts
self.post_script = post_script
self.include_plotlyjs = "cdn" if self.connected else include_plotlyjs
def activate(self):
if self.global_init:
if not ipython_display:
raise ValueError(
"The {cls} class requires ipython but it is not installed".format(
cls=self.__class__.__name__
)
)
if self.connected:
script = """\
<script type="text/javascript">
{win_config}
{mathjax_config}
</script>
<script type="module">import \"{plotly_cdn}\"</script>
""".format(
win_config=_window_plotly_config,
mathjax_config=_mathjax_config,
plotly_cdn=plotly_cdn_url().rstrip(".js"),
)
else:
# If not connected then we embed a copy of the plotly.js
# library in the notebook
script = """\
<script type="text/javascript">
{win_config}
{mathjax_config}
</script>
<script>{script}</script>
""".format(
script=get_plotlyjs(),
win_config=_window_plotly_config,
mathjax_config=_mathjax_config,
)
ipython_display.display_html(script, raw=True)
def to_mimebundle(self, fig_dict):
from plotly.io import to_html
include_mathjax = "cdn"
# build post script
post_script = [
"""
var gd = document.getElementById('{plot_id}');
var x = new MutationObserver(function (mutations, observer) {{
var display = window.getComputedStyle(gd).display;
if (!display || display === 'none') {{
console.log([gd, 'removed!']);
Plotly.purge(gd);
observer.disconnect();
}}
}});
// Listen for the removal of the full notebook cells
var notebookContainer = gd.closest('#notebook-container');
if (notebookContainer) {{
x.observe(notebookContainer, {childList: true});
}}
// Listen for the clearing of the current output cell
var outputEl = gd.closest('.output');
if (outputEl) {{
x.observe(outputEl, {childList: true});
}}
"""
]
# Add user defined post script
if self.post_script:
if not isinstance(self.post_script, (list, tuple)):
post_script.append(self.post_script)
else:
post_script.extend(self.post_script)
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=self.include_plotlyjs,
include_mathjax=include_mathjax,
post_script=post_script,
full_html=self.full_html,
animation_opts=self.animation_opts,
default_width="100%",
default_height=525,
validate=False,
)
return {"text/html": html}
| HtmlRenderer |
python | sympy__sympy | sympy/physics/mechanics/tests/test_actuator.py | {
"start": 15336,
"end": 22000
} | class ____:
@pytest.fixture(autouse=True)
def _torque_actuator_fixture(self):
self.torque = Symbol('T')
self.N = ReferenceFrame('N')
self.A = ReferenceFrame('A')
self.axis = self.N.z
self.target = RigidBody('target', frame=self.N)
self.reaction = RigidBody('reaction', frame=self.A)
def test_is_actuator_base_subclass(self):
assert issubclass(TorqueActuator, ActuatorBase)
@pytest.mark.parametrize(
'torque',
[
Symbol('T'),
dynamicsymbols('T'),
Symbol('T')**2 + Symbol('T'),
]
)
@pytest.mark.parametrize(
'target_frame, reaction_frame',
[
(target.frame, reaction.frame),
(target, reaction.frame),
(target.frame, reaction),
(target, reaction),
]
)
def test_valid_constructor_with_reaction(
self,
torque,
target_frame,
reaction_frame,
):
instance = TorqueActuator(
torque,
self.axis,
target_frame,
reaction_frame,
)
assert isinstance(instance, TorqueActuator)
assert hasattr(instance, 'torque')
assert isinstance(instance.torque, ExprType)
assert instance.torque == torque
assert hasattr(instance, 'axis')
assert isinstance(instance.axis, Vector)
assert instance.axis == self.axis
assert hasattr(instance, 'target_frame')
assert isinstance(instance.target_frame, ReferenceFrame)
assert instance.target_frame == target.frame
assert hasattr(instance, 'reaction_frame')
assert isinstance(instance.reaction_frame, ReferenceFrame)
assert instance.reaction_frame == reaction.frame
@pytest.mark.parametrize(
'torque',
[
Symbol('T'),
dynamicsymbols('T'),
Symbol('T')**2 + Symbol('T'),
]
)
@pytest.mark.parametrize('target_frame', [target.frame, target])
def test_valid_constructor_without_reaction(self, torque, target_frame):
instance = TorqueActuator(torque, self.axis, target_frame)
assert isinstance(instance, TorqueActuator)
assert hasattr(instance, 'torque')
assert isinstance(instance.torque, ExprType)
assert instance.torque == torque
assert hasattr(instance, 'axis')
assert isinstance(instance.axis, Vector)
assert instance.axis == self.axis
assert hasattr(instance, 'target_frame')
assert isinstance(instance.target_frame, ReferenceFrame)
assert instance.target_frame == target.frame
assert hasattr(instance, 'reaction_frame')
assert instance.reaction_frame is None
@pytest.mark.parametrize('torque', [None, 'T'])
def test_invalid_constructor_torque_not_sympifyable(self, torque):
with pytest.raises(SympifyError):
_ = TorqueActuator(torque, self.axis, self.target)
@pytest.mark.parametrize('axis', [Symbol('a'), dynamicsymbols('a')])
def test_invalid_constructor_axis_not_vector(self, axis):
with pytest.raises(TypeError):
_ = TorqueActuator(self.torque, axis, self.target, self.reaction)
@pytest.mark.parametrize(
'frames',
[
(None, ReferenceFrame('child')),
(ReferenceFrame('parent'), True),
(None, RigidBody('child')),
(RigidBody('parent'), True),
]
)
def test_invalid_constructor_frames_not_frame(self, frames):
with pytest.raises(TypeError):
_ = TorqueActuator(self.torque, self.axis, *frames)
@pytest.mark.parametrize(
'property_name, fixture_attr_name',
[
('torque', 'torque'),
('axis', 'axis'),
('target_frame', 'target'),
('reaction_frame', 'reaction'),
]
)
def test_properties_are_immutable(self, property_name, fixture_attr_name):
actuator = TorqueActuator(
self.torque,
self.axis,
self.target,
self.reaction,
)
value = getattr(self, fixture_attr_name)
with pytest.raises(AttributeError):
setattr(actuator, property_name, value)
def test_repr_without_reaction(self):
actuator = TorqueActuator(self.torque, self.axis, self.target)
expected = 'TorqueActuator(T, axis=N.z, target_frame=N)'
assert repr(actuator) == expected
def test_repr_with_reaction(self):
actuator = TorqueActuator(
self.torque,
self.axis,
self.target,
self.reaction,
)
expected = 'TorqueActuator(T, axis=N.z, target_frame=N, reaction_frame=A)'
assert repr(actuator) == expected
def test_at_pin_joint_constructor(self):
pin_joint = PinJoint(
'pin',
self.target,
self.reaction,
coordinates=dynamicsymbols('q'),
speeds=dynamicsymbols('u'),
parent_interframe=self.N,
joint_axis=self.axis,
)
instance = TorqueActuator.at_pin_joint(self.torque, pin_joint)
assert isinstance(instance, TorqueActuator)
assert hasattr(instance, 'torque')
assert isinstance(instance.torque, ExprType)
assert instance.torque == self.torque
assert hasattr(instance, 'axis')
assert isinstance(instance.axis, Vector)
assert instance.axis == self.axis
assert hasattr(instance, 'target_frame')
assert isinstance(instance.target_frame, ReferenceFrame)
assert instance.target_frame == self.A
assert hasattr(instance, 'reaction_frame')
assert isinstance(instance.reaction_frame, ReferenceFrame)
assert instance.reaction_frame == self.N
def test_at_pin_joint_pin_joint_not_pin_joint_invalid(self):
with pytest.raises(TypeError):
_ = TorqueActuator.at_pin_joint(self.torque, Symbol('pin'))
def test_to_loads_without_reaction(self):
actuator = TorqueActuator(self.torque, self.axis, self.target)
expected = [
(self.N, self.torque*self.axis),
]
assert actuator.to_loads() == expected
def test_to_loads_with_reaction(self):
actuator = TorqueActuator(
self.torque,
self.axis,
self.target,
self.reaction,
)
expected = [
(self.N, self.torque*self.axis),
(self.A, - self.torque*self.axis),
]
assert actuator.to_loads() == expected
| TestTorqueActuator |
python | pytest-dev__pytest | testing/python/fixtures.py | {
"start": 36216,
"end": 36668
} | class ____:
@pytest.fixture(scope="session")
def session_request(self, request):
return request
@pytest.mark.parametrize("name", ["path", "module"])
def test_session_scoped_unavailable_attributes(self, session_request, name):
with pytest.raises(
AttributeError,
match=f"{name} not available in session-scoped context",
):
getattr(session_request, name)
| TestRequestSessionScoped |
python | google__pytype | pytype/pyi/modules.py | {
"start": 539,
"end": 4001
} | class ____:
"""Module and package details."""
def __init__(self, filename, module_name):
self.filename = filename
self.module_name = module_name
is_package = file_utils.is_pyi_directory_init(filename)
self.package_name = module_utils.get_package_name(module_name, is_package)
self.parent_name = module_utils.get_package_name(self.package_name, False)
def _qualify_name_with_special_dir(self, orig_name):
"""Handle the case of '.' and '..' as package names."""
if "__PACKAGE__." in orig_name:
# Generated from "from . import foo" - see parser.yy
prefix, _, name = orig_name.partition("__PACKAGE__.")
if prefix:
raise _ParseError(f"Cannot resolve import: {orig_name}")
return f"{self.package_name}.{name}"
elif "__PARENT__." in orig_name:
# Generated from "from .. import foo" - see parser.yy
prefix, _, name = orig_name.partition("__PARENT__.")
if prefix:
raise _ParseError(f"Cannot resolve import: {orig_name}")
if not self.parent_name:
raise _ParseError(
f"Cannot resolve relative import ..: Package {self.package_name} "
"has no parent"
)
return f"{self.parent_name}.{name}"
else:
return None
def qualify_name(self, orig_name):
"""Qualify an import name."""
if not self.package_name:
return orig_name
rel_name = self._qualify_name_with_special_dir(orig_name)
if rel_name:
return rel_name
if orig_name.startswith("."):
name = module_utils.get_absolute_name(self.package_name, orig_name)
if name is None:
raise _ParseError(
f"Cannot resolve relative import {orig_name.rsplit('.', 1)[0]}"
)
return name
return orig_name
def process_import(self, item):
"""Process 'import a, b as c, ...'."""
if isinstance(item, tuple):
name, new_name = item
else:
name = new_name = item
if name == new_name == "__builtin__":
# 'import __builtin__' should be completely ignored; this is the PY2 name
# of the builtins module.
return None
module_name = self.qualify_name(name)
as_name = self.qualify_name(new_name)
t = pytd.Module(name=as_name, module_name=module_name)
return Import(pytd_node=t, name=name, new_name=new_name)
def process_from_import(self, from_package, item):
"""Process 'from a.b.c import d, ...'."""
if isinstance(item, tuple):
name, new_name = item
else:
name = new_name = item
qualified_name = self.qualify_name(f"{from_package}.{name}")
# We should ideally not need this check, but we have typing
# special-cased in some places.
if not qualified_name.startswith("typing.") and name != "*":
# Mark this as an externally imported type, so that AddNamePrefix
# does not prefix it with the current package name.
qualified_name = parser_constants.EXTERNAL_NAME_PREFIX + qualified_name
t = pytd.NamedType(qualified_name)
if name == "*":
# A star import is stored as
# 'imported_mod.* = imported_mod.*'. The imported module needs to be
# in the alias name so that multiple star imports are handled
# properly. LookupExternalTypes() replaces the alias with the
# contents of the imported module.
assert new_name == name
new_name = t.name
return Import(
pytd_node=t, name=name, new_name=new_name, qualified_name=qualified_name
)
| Module |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0014_add-state-tracking.py | {
"start": 100,
"end": 775
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0013_add-container-limits"),
]
operations = [
migrations.AddField(
model_name="project",
name="has_valid_clone",
field=models.BooleanField(
default=False, help_text="This project has been successfully cloned"
),
),
migrations.AddField(
model_name="project",
name="has_valid_webhook",
field=models.BooleanField(
default=False, help_text="This project has been build with a webhook"
),
),
]
| Migration |
python | sphinx-doc__sphinx | sphinx/util/inspect.py | {
"start": 20266,
"end": 21236
} | class ____:
"""Pseudo typing class for :confval:`autodoc_type_aliases`.
This avoids the error on evaluating the type inside :func:`typing.get_type_hints()`.
"""
def __init__(self, name: str) -> None:
self.name = name
def __call__(self) -> None:
# Dummy method to imitate special typing classes
pass
def __eq__(self, other: object) -> bool:
return self.name == other
def __hash__(self) -> int:
return hash(self.name)
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.name!r})'
def __or__(self, other: Any) -> Any:
# When evaluating type hints, our forward ref can appear in type expressions,
# i.e. `Alias | None`. This means it needs to support ``__or__`` and ``__ror__``.
return typing.Union[self, other] # NoQA: UP007
def __ror__(self, other: Any) -> Any:
return typing.Union[other, self] # NoQA: UP007
| TypeAliasForwardRef |
python | django__django | django/db/migrations/serializer.py | {
"start": 12120,
"end": 14835
} | class ____:
_registry = {
# Some of these are order-dependent.
frozenset: FrozensetSerializer,
list: SequenceSerializer,
set: SetSerializer,
tuple: TupleSerializer,
dict: DictionarySerializer,
models.Choices: ChoicesSerializer,
enum.Enum: EnumSerializer,
datetime.datetime: DatetimeDatetimeSerializer,
(datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer,
SettingsReference: SettingsReferenceSerializer,
float: FloatSerializer,
(bool, int, types.NoneType, bytes, str, range): BaseSimpleSerializer,
decimal.Decimal: DecimalSerializer,
(functools.partial, functools.partialmethod): FunctoolsPartialSerializer,
FUNCTION_TYPES: FunctionTypeSerializer,
types.GenericAlias: GenericAliasSerializer,
collections.abc.Iterable: IterableSerializer,
(COMPILED_REGEX_TYPE, RegexObject): RegexSerializer,
uuid.UUID: UUIDSerializer,
pathlib.PurePath: PathSerializer,
os.PathLike: PathLikeSerializer,
zoneinfo.ZoneInfo: ZoneInfoSerializer,
DatabaseOnDelete: DatabaseOnDeleteSerializer,
}
@classmethod
def register(cls, type_, serializer):
if not issubclass(serializer, BaseSerializer):
raise ValueError(
"'%s' must inherit from 'BaseSerializer'." % serializer.__name__
)
cls._registry[type_] = serializer
@classmethod
def unregister(cls, type_):
cls._registry.pop(type_)
def serializer_factory(value):
if isinstance(value, Promise):
value = str(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, "deconstruct"):
return DeconstructibleSerializer(value)
for type_, serializer_cls in Serializer._registry.items():
if isinstance(value, type_):
return serializer_cls(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
| Serializer |
python | pypa__pip | src/pip/_vendor/cachecontrol/caches/redis_cache.py | {
"start": 296,
"end": 1386
} | class ____(BaseCache):
def __init__(self, conn: Redis[bytes]) -> None:
self.conn = conn
def get(self, key: str) -> bytes | None:
return self.conn.get(key)
def set(
self, key: str, value: bytes, expires: int | datetime | None = None
) -> None:
if not expires:
self.conn.set(key, value)
elif isinstance(expires, datetime):
now_utc = datetime.now(timezone.utc)
if expires.tzinfo is None:
now_utc = now_utc.replace(tzinfo=None)
delta = expires - now_utc
self.conn.setex(key, int(delta.total_seconds()), value)
else:
self.conn.setex(key, expires, value)
def delete(self, key: str) -> None:
self.conn.delete(key)
def clear(self) -> None:
"""Helper for clearing all the keys in a database. Use with
caution!"""
for key in self.conn.keys():
self.conn.delete(key)
def close(self) -> None:
"""Redis uses connection pooling, no need to close the connection."""
pass
| RedisCache |
python | Textualize__textual | docs/examples/guide/widgets/hello01.py | {
"start": 223,
"end": 372
} | class ____(App):
def compose(self) -> ComposeResult:
yield Hello()
if __name__ == "__main__":
app = CustomApp()
app.run()
| CustomApp |
python | doocs__leetcode | solution/0900-0999/0958.Check Completeness of a Binary Tree/Solution.py | {
"start": 192,
"end": 499
} | class ____:
def isCompleteTree(self, root: TreeNode) -> bool:
q = deque([root])
while q:
node = q.popleft()
if node is None:
break
q.append(node.left)
q.append(node.right)
return all(node is None for node in q)
| Solution |
python | automl__auto-sklearn | test/test_pipeline/implementations/test_CategoryShift.py | {
"start": 136,
"end": 1181
} | class ____(unittest.TestCase):
def test_dense(self):
X = np.random.randint(0, 255, (3, 4))
Y = CategoryShift().fit_transform(X)
self.assertTrue((Y == X + 3).all())
def test_sparse(self):
X = scipy.sparse.csc_matrix(
([1, 2, 0, 4], ([0, 1, 2, 1], [3, 2, 1, 0])), shape=(3, 4)
)
Y = CategoryShift().fit_transform(X)
X.data += 3
self.assertTrue((Y.todense() == X.todense()).all())
# Check if the sparsity stays the same before and after the transformation
self.assertEqual(X.data.shape, Y.data.shape)
self.assertTrue((X.indices == Y.indices).all())
self.assertTrue((X.indptr == Y.indptr).all())
def test_negative(self):
X = np.array([[-1, 2], [3, 4]])
with self.assertRaises(ValueError):
CategoryShift().fit_transform(X)
def test_string(self):
X = np.array([["a", "b"], ["c", "d"]])
with self.assertRaises(ValueError):
CategoryShift().fit_transform(X)
| CategoryShiftTest |
python | tensorflow__tensorflow | tensorflow/python/keras/losses.py | {
"start": 19016,
"end": 23124
} | class ____(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
`from_logits=False`).
**Recommended Usage:** (set `from_logits=True`)
With `tf.keras` API:
```python
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
....
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = [0, 1, 0, 0]
>>> y_pred = [-18.6, 0.51, 2.94, -12.8]
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred).numpy()
0.865
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred).numpy()
0.865
>>> # Using 'sample_weight' attribute
>>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.243
>>> # Using 'sum' reduction` type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=tf.keras.losses.Reduction.SUM)
>>> bce(y_true, y_pred).numpy()
1.730
>>> # Using 'none' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=tf.keras.losses.Reduction.NONE)
>>> bce(y_true, y_pred).numpy()
array([0.235, 1.496], dtype=float32)
**Default Usage:** (set `from_logits=False`)
>>> # Make the following updates to the above "Recommended Usage" section
>>> # 1. Set `from_logits=False`
>>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
>>> # 2. Update `y_pred` to use probabilities instead of logits
>>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
"""Initializes `BinaryCrossentropy` instance.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0, 1]).
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,
we compute the loss between the predicted labels and a smoothed version
of the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to -1.
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Name for the op. Defaults to 'binary_crossentropy'.
"""
super().__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis)
self.from_logits = from_logits
| BinaryCrossentropy |
python | scrapy__scrapy | tests/spiders.py | {
"start": 1020,
"end": 1754
} | class ____(MetaSpider):
name = "follow"
link_extractor = LinkExtractor()
def __init__(
self, total=10, show=20, order="rand", maxlatency=0.0, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.urls_visited = []
self.times = []
qargs = {"total": total, "show": show, "order": order, "maxlatency": maxlatency}
url = self.mockserver.url(f"/follow?{urlencode(qargs, doseq=True)}")
self.start_urls = [url]
def parse(self, response):
self.urls_visited.append(response.url)
self.times.append(time.time())
for link in self.link_extractor.extract_links(response):
yield Request(link.url, callback=self.parse)
| FollowAllSpider |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_monitors_test.py | {
"start": 9455,
"end": 21089
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def testInfNanMonitorStartsWithEmptyAlerts(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
self.assertEmpty(monitor.alerts())
def testInfNanMonitorOnExecutionUnderCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(50, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, 50)
self.assertIsNone(alert.graph_execution_trace_index)
@parameterized.named_parameters(
("ConciseHealth",
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
[[-1, 10, 1, 2, 3],
[-1, 100, 0, 0, 0]]),
("FullHealth",
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
# [tensor_id, device_id, dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count,
# neg_finite_count, zero_count, pos_finite_count].
[[-1, -1, 1, 1, 10, 1, 2, 3, 0, 0, 0],
[-1, -1, 1, 1, 100, 0, 0, 0, 10, 30, 60]]),
)
def testInfNanMonitorOnExecutionUnderHealthMode(self,
tensor_debug_mode,
debug_tensor_values):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "BarOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01",
["a1", "b2", "e3"],
tensor_debug_mode,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=debug_tensor_values)
monitor.on_execution(60, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BarOp")
self.assertEqual(alert.output_slot, 0)
self.assertEqual(alert.size, 10)
self.assertEqual(alert.num_neg_inf, 1)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 3)
self.assertEqual(alert.execution_index, 60)
self.assertIsNone(alert.graph_execution_trace_index)
@parameterized.named_parameters(
("Shape",
debug_event_pb2.TensorDebugMode.SHAPE,
# [tensor_id, dtype, rank, element_cont, ...shape_truncate_6]
[[-1, 1, 2, 6, 3, 2, 0, 0, 0, 0],
[-1, 10, 1, 7, 7, 0, 0, 0, 0, 0]]),
)
def testInfNanMonitorOnExecutionUnderModeWithNoInfNanInfo(
self,
tensor_debug_mode,
debug_tensor_values):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "BarOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01",
["a1", "b2", "e3"],
tensor_debug_mode,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=debug_tensor_values)
monitor.on_execution(60, execution)
self.assertEmpty(monitor.alerts())
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnExecutionUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.execution_to_tensor_values.return_value = [
np.array([[0.0, -1.0, 1.0]]),
np.array(tensor_value, dtype=dtype)
]
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234,
1,
"__inference_bar_function_1234",
output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.FULL_TENSOR,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78])
monitor.on_execution(70, execution)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "__inference_bar_function_1234")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertEqual(alert.execution_index, 70)
self.assertIsNone(alert.graph_execution_trace_index, 70)
else:
self.assertEmpty(monitor.alerts())
def testInfNaNMonitorOnGraphExecutionTraceCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[9, 1]) # [tensor_id, any_inf_nan].
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 55)
def testInfNaNMonitorOnGraphExecutionTraceConciseHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest,
["g0", "g1"],
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
debug_tensor_value=[9, 100, 3, 2, 1])
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, 100)
self.assertEqual(alert.num_neg_inf, 3)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 1)
self.assertEqual(alert.graph_execution_trace_index, 55)
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnGraphExecutionTraceUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.graph_execution_trace_to_tensor_value.return_value = np.array(
tensor_value, dtype=dtype)
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "BazOp", "name_scope_3/BazOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"], debug_event_pb2.TensorDebugMode.FULL_TENSOR)
monitor.on_graph_execution_trace(80, trace)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BazOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 80)
else:
self.assertEmpty(monitor.alerts())
def testLimitingInfNanMonitorAlertCountWorks(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader, limit=3)
for i in range(10):
execution_digest = debug_events_reader.ExecutionDigest(
i * 1000, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(i, execution)
alerts = monitor.alerts()
self.assertLen(alerts, 3)
for i, alert in enumerate(alerts):
self.assertEqual(alert.wall_time, i * 1000)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, i)
self.assertIsNone(alert.graph_execution_trace_index)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| InfNanMonitorTest |
python | tensorflow__tensorflow | tensorflow/python/saved_model/model_utils/export_output.py | {
"start": 14991,
"end": 15341
} | class ____(_SupervisedOutput):
"""Represents the output of a supervised eval process.
This class generates the appropriate signature def for exporting
eval output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_eval_signature_def
| EvalOutput |
python | tensorflow__tensorflow | tensorflow/python/training/queue_runner_test.py | {
"start": 1550,
"end": 15207
} | class ____(test.TestCase):
def testBasic(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0"])
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testTwoOps(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var0 = variable_v1.VariableV1(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = variable_v1.VariableV1(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-CountUpTo_1:0"])
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, self.evaluate(var0))
self.assertEqual(30, self.evaluate(var1))
def testExceptionsCaptured(self):
with self.cached_session() as sess:
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [_MockOp("i fail"),
_MockOp("so fail")])
threads = qr.create_threads(sess)
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.cached_session() as sess:
q0 = data_flow_ops.FIFOQueue(3, dtypes.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = data_flow_ops.FIFOQueue(30, dtypes.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = queue_runner_impl.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, self.evaluate(dequeue1))
self.assertEqual(10.0, self.evaluate(dequeue1))
# And queue1 should now be closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError, "is closed"):
self.evaluate(dequeue1)
def testRespectCoordShouldStop(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = coordinator.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-close_on_stop"])
for t in threads:
t.start()
coord.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, self.evaluate(var))
def testRequestStopOnException(self):
with self.cached_session() as sess:
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [_MockOp("not an op")])
coord = coordinator.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegex(ValueError, "Operation not in the graph"):
coord.join()
def testGracePeriod(self):
with self.cached_session() as sess:
# The enqueue will quickly block.
queue = data_flow_ops.FIFOQueue(2, dtypes.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = queue_runner_impl.QueueRunner(queue, [enqueue])
coord = coordinator.Coordinator()
qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(stop_grace_period_secs=1.0)
def testMultipleSessions(self):
with self.cached_session() as sess:
with session.Session() as other_sess:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# NOTE that this test does not actually start the threads.
threads = qr.create_threads(sess, coord=coord)
other_threads = qr.create_threads(other_sess, coord=coord)
self.assertEqual(len(threads), len(other_threads))
def testIgnoreMultiStarts(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = []
# NOTE that this test does not actually start the threads.
threads.extend(qr.create_threads(sess, coord=coord))
new_threads = qr.create_threads(sess, coord=coord)
self.assertEqual([], new_threads)
def testThreads(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to,
_MockOp("bad_op")])
threads = qr.create_threads(sess, start=True)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-bad_op"])
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
def testName(self):
with ops.name_scope("scope"):
queue = data_flow_ops.FIFOQueue(10, dtypes.float32, name="queue")
qr = queue_runner_impl.QueueRunner(queue, [control_flow_ops.no_op()])
self.assertEqual("scope/queue", qr.name)
queue_runner_impl.add_queue_runner(qr)
self.assertEqual(
1, len(ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS, "scope")))
def testStartQueueRunners(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session() as sess:
init_op.run()
threads = queue_runner_impl.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testStartQueueRunnersRaisesIfNotASession(self):
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session():
init_op.run()
with self.assertRaisesRegex(TypeError, "tf.Session"):
queue_runner_impl.start_queue_runners("NotASession")
def testStartQueueRunnersIgnoresMonitoredSession(self):
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session():
init_op.run()
threads = queue_runner_impl.start_queue_runners(
monitored_session.MonitoredSession())
self.assertFalse(threads)
def testStartQueueRunnersNonDefaultGraph(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
graph = ops.Graph()
with graph.as_default():
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variable_v1.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.session(graph=graph) as sess:
init_op.run()
threads = queue_runner_impl.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testQueueRunnerSerializationRoundTrip(self):
graph = ops.Graph()
with graph.as_default():
queue = data_flow_ops.FIFOQueue(10, dtypes.float32, name="queue")
enqueue_op = control_flow_ops.no_op(name="enqueue")
close_op = control_flow_ops.no_op(name="close")
cancel_op = control_flow_ops.no_op(name="cancel")
qr0 = queue_runner_impl.QueueRunner(
queue, [enqueue_op],
close_op,
cancel_op,
queue_closed_exception_types=(errors_impl.OutOfRangeError,
errors_impl.CancelledError))
qr0_proto = queue_runner_impl.QueueRunner.to_proto(qr0)
qr0_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_recon.queue.name)
self.assertEqual(1, len(qr0_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_recon.close_op)
self.assertEqual(cancel_op, qr0_recon.cancel_op)
self.assertEqual(
(errors_impl.OutOfRangeError, errors_impl.CancelledError),
qr0_recon.queue_closed_exception_types)
# Assert we reconstruct an OutOfRangeError for QueueRunners
# created before QueueRunnerDef had a queue_closed_exception_types field.
del qr0_proto.queue_closed_exception_types[:]
qr0_legacy_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_legacy_recon.queue.name)
self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_legacy_recon.close_op)
self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
self.assertEqual((errors_impl.OutOfRangeError,),
qr0_legacy_recon.queue_closed_exception_types)
if __name__ == "__main__":
test.main()
| QueueRunnerTest |
python | FactoryBoy__factory_boy | tests/test_base.py | {
"start": 831,
"end": 991
} | class ____(unittest.TestCase):
def test_base_factory(self):
with self.assertRaises(errors.FactoryError):
base.BaseFactory()
| SafetyTestCase |
python | doocs__leetcode | solution/1200-1299/1283.Find the Smallest Divisor Given a Threshold/Solution.py | {
"start": 0,
"end": 268
} | class ____:
def smallestDivisor(self, nums: List[int], threshold: int) -> int:
def f(v: int) -> bool:
v += 1
return sum((x + v - 1) // v for x in nums) <= threshold
return bisect_left(range(max(nums)), True, key=f) + 1
| Solution |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 11406,
"end": 11631
} | class ____(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertIsInstance(testform(request), TemplateResponse)
| SessionFormTests |
python | pytest-dev__pytest | src/_pytest/nodes.py | {
"start": 4117,
"end": 17164
} | class ____(abc.ABC, metaclass=NodeMeta):
r"""Base class of :class:`Collector` and :class:`Item`, the components of
the test collection tree.
``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the
leaf nodes.
"""
# Implemented in the legacypath plugin.
#: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage
#: for methods not migrated to ``pathlib.Path`` yet, such as
#: :meth:`Item.reportinfo <pytest.Item.reportinfo>`. Will be deprecated in
#: a future release, prefer using :attr:`path` instead.
fspath: LEGACY_PATH
# Use __slots__ to make attribute access faster.
# Note that __dict__ is still available.
__slots__ = (
"__dict__",
"_nodeid",
"_store",
"config",
"name",
"parent",
"path",
"session",
)
def __init__(
self,
name: str,
parent: Node | None = None,
config: Config | None = None,
session: Session | None = None,
fspath: LEGACY_PATH | None = None,
path: Path | None = None,
nodeid: str | None = None,
) -> None:
#: A unique name within the scope of the parent node.
self.name: str = name
#: The parent collector node.
self.parent = parent
if config:
#: The pytest config object.
self.config: Config = config
else:
if not parent:
raise TypeError("config or parent must be provided")
self.config = parent.config
if session:
#: The pytest session this node is part of.
self.session: Session = session
else:
if not parent:
raise TypeError("session or parent must be provided")
self.session = parent.session
if path is None and fspath is None:
path = getattr(parent, "path", None)
#: Filesystem path where this node was collected from (can be None).
self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath)
# The explicit annotation is to avoid publicly exposing NodeKeywords.
#: Keywords/markers collected from all scopes.
self.keywords: MutableMapping[str, Any] = NodeKeywords(self)
#: The marker objects belonging to this node.
self.own_markers: list[Mark] = []
#: Allow adding of extra keywords to use for matching.
self.extra_keyword_matches: set[str] = set()
if nodeid is not None:
assert "::()" not in nodeid
self._nodeid = nodeid
else:
if not self.parent:
raise TypeError("nodeid or parent must be provided")
self._nodeid = self.parent.nodeid + "::" + self.name
#: A place where plugins can store information on the node for their
#: own use.
self.stash: Stash = Stash()
# Deprecated alias. Was never public. Can be removed in a few releases.
self._store = self.stash
@classmethod
def from_parent(cls, parent: Node, **kw) -> Self:
"""Public constructor for Nodes.
This indirection got introduced in order to enable removing
the fragile logic from the node constructors.
Subclasses can use ``super().from_parent(...)`` when overriding the
construction.
:param parent: The parent node of this Node.
"""
if "config" in kw:
raise TypeError("config is not a valid argument for from_parent")
if "session" in kw:
raise TypeError("session is not a valid argument for from_parent")
return cls._create(parent=parent, **kw)
@property
def ihook(self) -> pluggy.HookRelay:
"""fspath-sensitive hook proxy used to call pytest hooks."""
return self.session.gethookproxy(self.path)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
def warn(self, warning: Warning) -> None:
"""Issue a warning for this Node.
Warnings will be displayed after the test session, unless explicitly suppressed.
:param Warning warning:
The warning instance to issue.
:raises ValueError: If ``warning`` instance is not a subclass of Warning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
node.warn(UserWarning("some message"))
.. versionchanged:: 6.2
Any subclass of :class:`Warning` is now accepted, rather than only
:class:`PytestWarning <pytest.PytestWarning>` subclasses.
"""
# enforce type checks here to avoid getting a generic type error later otherwise.
if not isinstance(warning, Warning):
raise ValueError(
f"warning must be an instance of Warning or subclass, got {warning!r}"
)
path, lineno = get_fslocation_from_item(self)
assert lineno is not None
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1,
)
# Methods for ordering nodes.
@property
def nodeid(self) -> str:
"""A ::-separated string denoting its collection tree address."""
return self._nodeid
def __hash__(self) -> int:
return hash(self._nodeid)
def setup(self) -> None:
pass
def teardown(self) -> None:
pass
def iter_parents(self) -> Iterator[Node]:
"""Iterate over all parent collectors starting from and including self
up to the root of the collection tree.
.. versionadded:: 8.1
"""
parent: Node | None = self
while parent is not None:
yield parent
parent = parent.parent
def listchain(self) -> list[Node]:
"""Return a list of all parent collectors starting from the root of the
collection tree down to and including self."""
chain = []
item: Node | None = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None:
"""Dynamically add a marker object to the node.
:param marker:
The marker.
:param append:
Whether to append the marker, or prepend it.
"""
from _pytest.mark import MARK_GEN
if isinstance(marker, MarkDecorator):
marker_ = marker
elif isinstance(marker, str):
marker_ = getattr(MARK_GEN, marker)
else:
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker_.name] = marker_
if append:
self.own_markers.append(marker_.mark)
else:
self.own_markers.insert(0, marker_.mark)
def iter_markers(self, name: str | None = None) -> Iterator[Mark]:
"""Iterate over all markers of the node.
:param name: If given, filter the results by the name attribute.
:returns: An iterator of the markers of the node.
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(
self, name: str | None = None
) -> Iterator[tuple[Node, Mark]]:
"""Iterate over all markers of the node.
:param name: If given, filter the results by the name attribute.
:returns: An iterator of (node, mark) tuples.
"""
for node in self.iter_parents():
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
@overload
def get_closest_marker(self, name: str) -> Mark | None: ...
@overload
def get_closest_marker(self, name: str, default: Mark) -> Mark: ...
def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None:
"""Return the first marker matching the name, from closest (for
example function) to farther level (for example module level).
:param default: Fallback return value if no marker was found.
:param name: Name to filter by.
"""
return next(self.iter_markers(name=name), default)
def listextrakeywords(self) -> set[str]:
"""Return a set of all extra keywords in self and any parents."""
extra_keywords: set[str] = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self) -> list[str]:
return [x.name for x in self.listchain()]
def addfinalizer(self, fin: Callable[[], object]) -> None:
"""Register a function to be called without arguments when this node is
finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls: type[_NodeType]) -> _NodeType | None:
"""Get the closest parent node (including self) which is an instance of
the given class.
:param cls: The node class to search for.
:returns: The node, if found.
"""
for node in self.iter_parents():
if isinstance(node, cls):
return node
return None
def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
return excinfo.traceback
def _repr_failure_py(
self,
excinfo: ExceptionInfo[BaseException],
style: TracebackStyle | None = None,
) -> TerminalRepr:
from _pytest.fixtures import FixtureLookupError
if isinstance(excinfo.value, ConftestImportFailure):
excinfo = ExceptionInfo.from_exception(excinfo.value.cause)
if isinstance(excinfo.value, fail.Exception):
if not excinfo.value.pytrace:
style = "value"
if isinstance(excinfo.value, FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback]
if self.config.getoption("fulltrace", False):
style = "long"
tbfilter = False
else:
tbfilter = self._traceback_filter
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.getoption("tbstyle", "auto") == "short":
style = "short"
else:
style = "long"
if self.config.get_verbosity() > 1:
truncate_locals = False
else:
truncate_locals = True
truncate_args = False if self.config.get_verbosity() > 2 else True
# excinfo.getrepr() formats paths relative to the CWD if `abspath` is False.
# It is possible for a fixture/test to change the CWD while this code runs, which
# would then result in the user seeing confusing paths in the failure message.
# To fix this, if the CWD changed, always display the full absolute path.
# It will be better to just always display paths relative to invocation_dir, but
# this requires a lot of plumbing (#6428).
try:
abspath = Path(os.getcwd()) != self.config.invocation_params.dir
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.getoption("showlocals", False),
style=style,
tbfilter=tbfilter,
truncate_locals=truncate_locals,
truncate_args=truncate_args,
)
def repr_failure(
self,
excinfo: ExceptionInfo[BaseException],
style: TracebackStyle | None = None,
) -> str | TerminalRepr:
"""Return a representation of a collection or test failure.
.. seealso:: :ref:`non-python tests`
:param excinfo: Exception information for the failure.
"""
return self._repr_failure_py(excinfo, style)
def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]:
"""Try to extract the actual location from a node, depending on available attributes:
* "location": a pair (path, lineno)
* "obj": a Python object that the node wraps.
* "path": just a path
:rtype: A tuple of (str|Path, int) with filename and 0-based line number.
"""
# See Item.location.
location: tuple[str, int | None, str] | None = getattr(node, "location", None)
if location is not None:
return location[:2]
obj = getattr(node, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(node, "path", "unknown location"), -1
| Node |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 66474,
"end": 66961
} | class ____:
bw_module: Callable
placeholder_list: list[Any]
saved_context: Optional[TracingContext]
saved_compile_context: Optional[CompileContext]
# On an AOT Autograd cache hit, we already have a lowered backward, so there is usually
# no need to keep information around for a new lazy compilation. Except for compiled autograd,
# which wants to retrace this backward into a larger graph, and it needs the graph module to do so.
@dataclass
| AutogradLazyBackwardCompileInfo |
python | spyder-ide__spyder | spyder/plugins/editor/api/editorextension.py | {
"start": 689,
"end": 4355
} | class ____(object):
"""
Base class for editor extensions.
An extension is a "thing" that can be installed on an editor to add new
behaviours or to modify its appearance.
A panel (model child class) is added to an editor by using PanelsManager,
:meth:`spyder.plugins.editor.widgets.codeeditor.CodeEditor.panels.append`.
Subclasses may/should override the following methods:
- :meth:`spyder.api.EditorExtension.on_install`
- :meth:`spyder.api.EditorExtension.on_uninstall`
- :meth:`spyder.api.EditorExtension.on_state_changed`
..warning: The editor extension will be identified by its class name, this
means that **there cannot be two editor extensions of the same type on the
same editor instance!**
"""
@property
def editor(self):
"""
Returns a reference to the parent code editor widget.
**READ ONLY**
:rtype: spyder.plugins.editor.widgets.codeeditor.CodeEditor
"""
return self._editor
@property
def enabled(self):
"""
Tells if the editor extension is enabled.
:meth:`spyder.api.EditorExtension.on_state_changed` will be called as
soon as the editor extension state changed.
:type: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
if enabled != self._enabled:
self._enabled = enabled
self.on_state_changed(enabled)
def __init__(self):
"""
EditorExtension name/identifier.
:class:`spyder.widgets.sourcecode.CodeEditor` uses that as the
attribute key when you install a editor extension.
"""
self.name = self.__class__.__name__
# EditorExtension description
self.description = self.__doc__
self._enabled = False
self._editor = None
self._on_close = False
def on_install(self, editor):
"""
Installs the extension on the editor.
:param editor: editor widget instance
:type editor: spyder.plugins.editor.widgets.codeeditor.CodeEditor
.. note:: This method is called by editor when you install a
EditorExtension.
You should never call it yourself, even in a subclasss.
.. warning:: Don't forget to call **super** when subclassing
"""
self._editor = editor
self.enabled = True
def on_uninstall(self):
"""Uninstalls the editor extension from the editor."""
self._on_close = True
self.enabled = False
self._editor = None
def on_state_changed(self, state):
"""
Called when the enable state has changed.
This method does not do anything, you may override it if you need
to connect/disconnect to the editor's signals (connect when state is
true and disconnect when it is false).
:param state: True = enabled, False = disabled
:type state: bool
"""
pass
def clone_settings(self, original):
"""
Clone the settings from another editor extension (same class).
This method is called when splitting an editor widget.
# TODO at the current estate this is not working
:param original: other editor extension (must be the same class).
.. note:: The base method does not do anything, you must implement
this method for every new editor extension/panel (if you plan on
using the split feature). You should also make sure any properties
will be propagated to the clones.
"""
pass
| EditorExtension |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_fillna.py | {
"start": 349,
"end": 28591
} | class ____:
def test_fillna_dict_inplace_nonunique_columns(self):
df = DataFrame(
{"A": [np.nan] * 3, "B": [NaT, Timestamp(1), NaT], "C": [np.nan, "foo", 2]}
)
df.columns = ["A", "A", "A"]
orig = df[:]
df.fillna({"A": 2}, inplace=True)
# The first and third columns can be set inplace, while the second cannot.
expected = DataFrame(
{"A": [2.0] * 3, "B": [2, Timestamp(1), 2], "C": [2, "foo", 2]}
)
expected.columns = ["A", "A", "A"]
tm.assert_frame_equal(df, expected)
assert not tm.shares_memory(df.iloc[:, 1], orig.iloc[:, 1])
def test_fillna_on_column_view(self):
# GH#46149 avoid unnecessary copies
arr = np.full((40, 50), np.nan)
df = DataFrame(arr, copy=False)
with tm.raises_chained_assignment_error():
df[0].fillna(-1, inplace=True)
assert np.isnan(arr[:, 0]).all()
# i.e. we didn't create a new 49-column block
assert len(df._mgr.blocks) == 1
assert np.shares_memory(df.values, arr)
def test_fillna_datetime(self, datetime_frame):
tf = datetime_frame
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
padded = datetime_frame.ffill()
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
msg = r"missing 1 required positional argument: 'value'"
with pytest.raises(TypeError, match=msg):
datetime_frame.fillna()
def test_fillna_mixed_type(self, float_string_frame, using_infer_string):
mf = float_string_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.ffill()
assert (
result.loc[result.index[-10:], "A"] == result.loc[result.index[-11], "A"]
).all()
assert (result.loc[result.index[5:20], "foo"] == "bar").all()
result = mf.fillna(value=0)
assert (result.loc[result.index[-10:], "A"] == 0).all()
assert (result.loc[result.index[5:20], "foo"] == 0).all()
def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
mf = mixed_float_frame.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype={"C": None})
result = mf.ffill()
_check_mixed_float(result, dtype={"C": None})
def test_fillna_different_dtype(self):
# with different dtype (GH#3386)
df = DataFrame(
[["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]]
)
result = df.fillna({2: "foo"})
expected = DataFrame(
[["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]]
)
# column is originally float (all-NaN) -> filling with string gives object dtype
expected[2] = expected[2].astype("object")
tm.assert_frame_equal(result, expected)
result = df.fillna({2: "foo"}, inplace=True)
assert result is df
tm.assert_frame_equal(df, expected)
def test_fillna_limit_and_value(self):
# limit and value
df = DataFrame(np.random.default_rng(2).standard_normal((10, 3)))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
tm.assert_frame_equal(result, expected)
def test_fillna_datelike(self):
# with datelike
# GH#6344
df = DataFrame(
{
"Date": [NaT, Timestamp("2014-1-1")],
"Date2": [Timestamp("2013-1-1"), NaT],
}
)
expected = df.copy()
expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"])
result = df.fillna(value={"Date": df["Date2"]})
tm.assert_frame_equal(result, expected)
def test_fillna_tzaware(self):
# with timezone
# GH#15855
df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
res = df.ffill()
tm.assert_frame_equal(res, exp)
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
res = df.bfill()
tm.assert_frame_equal(res, exp)
def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH#15522
df = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1, 2, np.nan, np.nan],
}
)
result = df.ffill()
expected = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1.0, 2.0, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_na_actions_categorical(self):
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
cat3 = Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3})
cat4 = Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
res = df.ffill()
tm.assert_frame_equal(res, df_exp_fill)
# dropna
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
df = DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_fillna_categorical_nan(self):
# GH#14021
# np.nan should always be a valid filler
cat = Categorical([np.nan, 2, np.nan])
val = Categorical([np.nan, np.nan, np.nan])
df = DataFrame({"cats": cat, "vals": val})
# GH#32950 df.median() is poorly behaved because there is no
# Categorical.median
median = Series({"cats": 2.0, "vals": np.nan})
res = df.fillna(median)
v_exp = [np.nan, np.nan, np.nan]
df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category")
tm.assert_frame_equal(res, df_exp)
result = df.cats.fillna(np.nan)
tm.assert_series_equal(result, df.cats)
result = df.vals.fillna(np.nan)
tm.assert_series_equal(result, df.vals)
idx = DatetimeIndex(
["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", NaT, NaT]
)
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
idx = PeriodIndex(["2011-01", "2011-01", "2011-01", NaT, NaT], freq="M")
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
idx = TimedeltaIndex(["1 days", "2 days", "1 days", NaT, NaT])
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
def test_fillna_no_downcast(self, frame_or_series):
# GH#45603 preserve object dtype
obj = frame_or_series([1, 2, 3], dtype="object")
result = obj.fillna("")
tm.assert_equal(result, obj)
@pytest.mark.parametrize("columns", [["A", "A", "B"], ["A", "A"]])
def test_fillna_dictlike_value_duplicate_colnames(self, columns):
# GH#43476
df = DataFrame(np.nan, index=[0, 1], columns=columns)
with tm.assert_produces_warning(None):
result = df.fillna({"A": 0})
expected = df.copy()
expected["A"] = 0.0
tm.assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.dtypes
expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(
1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5], dtype=object
)
tm.assert_frame_equal(result, expected)
# empty block
df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
result = df.fillna("nan")
expected = DataFrame("nan", dtype="object", index=range(3), columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("val", ["", 1, np.nan, 1.0])
def test_fillna_dtype_conversion_equiv_replace(self, val):
df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]})
expected = df.replace(np.nan, val)
result = df.fillna(val)
tm.assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH#7095
df = DataFrame(
{
"A": [-1, -2, np.nan],
"B": date_range("20130101", periods=3),
"C": ["foo", "bar", None],
"D": ["foo2", "bar2", None],
},
index=date_range("20130110", periods=3),
)
result = df.fillna("?")
expected = DataFrame(
{
"A": [-1, -2, "?"],
"B": date_range("20130101", periods=3),
"C": ["foo", "bar", "?"],
"D": ["foo2", "bar2", "?"],
},
index=date_range("20130110", periods=3),
)
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"A": [-1, -2, np.nan],
"B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), NaT],
"C": ["foo", "bar", None],
"D": ["foo2", "bar2", None],
},
index=date_range("20130110", periods=3),
)
result = df.fillna("?")
expected = DataFrame(
{
"A": [-1, -2, "?"],
"B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), "?"],
"C": ["foo", "bar", "?"],
"D": ["foo2", "bar2", "?"],
},
index=date_range("20130110", periods=3),
)
tm.assert_frame_equal(result, expected)
def test_ffill(self, datetime_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
alt = datetime_frame.ffill()
tm.assert_frame_equal(datetime_frame.ffill(), alt)
def test_bfill(self, datetime_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
alt = datetime_frame.bfill()
tm.assert_frame_equal(datetime_frame.bfill(), alt)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=index)
result = df[:2].reindex(index, method="pad", limit=5)
expected = df[:2].reindex(index).ffill()
expected.iloc[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method="backfill", limit=5)
expected = df[-2:].reindex(index).bfill()
expected.iloc[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=index)
result = df[:2].reindex(index)
result = result.ffill(limit=5)
expected = df[:2].reindex(index).ffill()
expected.iloc[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.bfill(limit=5)
expected = df[-2:].reindex(index).bfill()
expected.iloc[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)).astype(int))
# it works!
df.fillna(np.nan)
@pytest.mark.parametrize("type", [int, float])
def test_fillna_positive_limit(self, type):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))).astype(type)
msg = "Limit must be greater than 0"
with pytest.raises(ValueError, match=msg):
df.fillna(0, limit=-5)
@pytest.mark.parametrize("type", [int, float])
def test_fillna_integer_limit(self, type):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))).astype(type)
msg = "Limit must be an integer"
with pytest.raises(ValueError, match=msg):
df.fillna(0, limit=0.5)
def test_fillna_inplace(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
df.loc[:4, 1] = np.nan
df.loc[-4:, 3] = np.nan
expected = df.fillna(value=0)
assert expected is not df
result = df.fillna(value=0, inplace=True)
assert result is df
tm.assert_frame_equal(df, expected)
result = df.fillna(value={0: 0}, inplace=True)
assert result is df
df.loc[:4, 1] = np.nan
df.loc[-4:, 3] = np.nan
expected = df.ffill()
assert expected is not df
df.ffill(inplace=True)
tm.assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame(
{
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
)
result = df.fillna({"a": 0, "b": 5})
expected = df.copy()
expected["a"] = expected["a"].fillna(0)
expected["b"] = expected["b"].fillna(5)
tm.assert_frame_equal(result, expected)
# it works
result = df.fillna({"a": 0, "b": 5, "d": 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
tm.assert_frame_equal(result, expected)
def test_fillna_dict_series_axis_1(self):
df = DataFrame(
{
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
)
result = df.fillna(df.max(axis=1), axis=1)
result = df.fillna(df.max(axis=1), axis=1, inplace=True)
assert result is df
expected = DataFrame(
{
"a": [1.0, 1.0, 2.0, 3.0, 4.0],
"b": [1.0, 2.0, 3.0, 3.0, 4.0],
"c": [1.0, 1.0, 2.0, 3.0, 4.0],
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, expected)
def test_fillna_dict_series_axis_1_mismatch_cols(self):
df = DataFrame(
{
"a": ["abc", "def", np.nan, "ghi", "jkl"],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
)
with pytest.raises(ValueError, match="All columns must have the same dtype"):
df.fillna(Series({"a": "abc", "b": "def", "c": "hij"}), axis=1)
def test_fillna_dict_series_axis_1_value_mismatch_with_cols(self):
df = DataFrame(
{
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
)
with pytest.raises(ValueError, match=".* not a suitable type to fill into .*"):
df.fillna(Series({"a": "abc", "b": "def", "c": "hij"}), axis=1)
def test_fillna_dataframe(self):
# GH#8377
df = DataFrame(
{
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
},
index=list("VWXYZ"),
)
# df2 may have different index and columns
df2 = DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame(
{
"a": [np.nan, 1, 2, np.nan, 40],
"b": [1, 2, 3, np.nan, 90],
"c": [np.nan, 1, 2, 3, 4],
},
index=list("VWXYZ"),
)
tm.assert_frame_equal(result, expected)
def test_fillna_columns(self):
arr = np.random.default_rng(2).standard_normal((10, 10))
arr[:, ::2] = np.nan
df = DataFrame(arr)
result = df.ffill(axis=1)
expected = df.T.ffill().T
tm.assert_frame_equal(result, expected)
df.insert(6, "foo", 5)
result = df.ffill(axis=1)
expected = df.astype(float).ffill(axis=1)
tm.assert_frame_equal(result, expected)
def test_fillna_invalid_value(self, float_frame):
# list
msg = '"value" parameter must be a scalar or dict, but you passed a "{}"'
with pytest.raises(TypeError, match=msg.format("list")):
float_frame.fillna([1, 2])
# tuple
with pytest.raises(TypeError, match=msg.format("tuple")):
float_frame.fillna((1, 2))
# frame with series
msg = (
'"value" parameter must be a scalar, dict or Series, but you '
'passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
float_frame.iloc[:, 0].fillna(float_frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.default_rng(2).random((20, 5))
df = DataFrame(index=range(20), columns=cols, data=data)
filled = df.ffill()
assert df.columns.tolist() == filled.columns.tolist()
def test_fill_empty(self, float_frame):
df = float_frame.reindex(columns=[])
result = df.fillna(value=0)
tm.assert_frame_equal(result, df)
def test_fillna_with_columns_and_limit(self):
# GH40989
df = DataFrame(
[
[np.nan, 2, np.nan, 0],
[3, 4, np.nan, 1],
[np.nan, np.nan, np.nan, 5],
[np.nan, 3, np.nan, 4],
],
columns=list("ABCD"),
)
result = df.fillna(axis=1, value=100, limit=1)
result2 = df.fillna(axis=1, value=100, limit=2)
expected = DataFrame(
{
"A": Series([100, 3, 100, 100], dtype="float64"),
"B": [2, 4, np.nan, 3],
"C": [np.nan, 100, np.nan, np.nan],
"D": Series([0, 1, 5, 4], dtype="float64"),
},
index=[0, 1, 2, 3],
)
expected2 = DataFrame(
{
"A": Series([100, 3, 100, 100], dtype="float64"),
"B": Series([2, 4, 100, 3], dtype="float64"),
"C": [100, 100, np.nan, 100],
"D": Series([0, 1, 5, 4], dtype="float64"),
},
index=[0, 1, 2, 3],
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected2)
def test_fillna_datetime_inplace(self):
# GH#48863
df = DataFrame(
{
"date1": to_datetime(["2018-05-30", None]),
"date2": to_datetime(["2018-09-30", None]),
}
)
expected = df.copy()
df.fillna(np.nan, inplace=True)
tm.assert_frame_equal(df, expected)
def test_fillna_inplace_with_columns_limit_and_value(self):
# GH40989
df = DataFrame(
[
[np.nan, 2, np.nan, 0],
[3, 4, np.nan, 1],
[np.nan, np.nan, np.nan, 5],
[np.nan, 3, np.nan, 4],
],
columns=list("ABCD"),
)
expected = df.fillna(axis=1, value=100, limit=1)
assert expected is not df
result = df.fillna(axis=1, value=100, limit=1, inplace=True)
assert result is df
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}])
def test_inplace_dict_update_view(self, val):
# GH#47188
df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]})
df_orig = df.copy()
result_view = df[:]
df.fillna(val, inplace=True)
expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(result_view, df_orig)
def test_single_block_df_with_horizontal_axis(self):
# GH 47713
df = DataFrame(
{
"col1": [5, 0, np.nan, 10, np.nan],
"col2": [7, np.nan, np.nan, 5, 3],
"col3": [12, np.nan, 1, 2, 0],
"col4": [np.nan, 1, 1, np.nan, 18],
}
)
result = df.fillna(50, limit=1, axis=1)
expected = DataFrame(
[
[5.0, 7.0, 12.0, 50.0],
[0.0, 50.0, np.nan, 1.0],
[50.0, np.nan, 1.0, 1.0],
[10.0, 5.0, 2.0, 50.0],
[50.0, 3.0, 0.0, 18.0],
],
columns=["col1", "col2", "col3", "col4"],
)
tm.assert_frame_equal(result, expected)
def test_fillna_with_multi_index_frame(self):
# GH 47649
pdf = DataFrame(
{
("x", "a"): [np.nan, 2.0, 3.0],
("x", "b"): [1.0, 2.0, np.nan],
("y", "c"): [1.0, 2.0, np.nan],
}
)
expected = DataFrame(
{
("x", "a"): [-1.0, 2.0, 3.0],
("x", "b"): [1.0, 2.0, -1.0],
("y", "c"): [1.0, 2.0, np.nan],
}
)
tm.assert_frame_equal(pdf.fillna({"x": -1}), expected)
tm.assert_frame_equal(pdf.fillna({"x": -1, ("x", "b"): -2}), expected)
expected = DataFrame(
{
("x", "a"): [-1.0, 2.0, 3.0],
("x", "b"): [1.0, 2.0, -2.0],
("y", "c"): [1.0, 2.0, np.nan],
}
)
tm.assert_frame_equal(pdf.fillna({("x", "b"): -2, "x": -1}), expected)
def test_fillna_nonconsolidated_frame():
# https://github.com/pandas-dev/pandas/issues/36495
df = DataFrame(
[
[1, 1, 1, 1.0],
[2, 2, 2, 2.0],
[3, 3, 3, 3.0],
],
columns=["i1", "i2", "i3", "f1"],
)
df_nonconsol = df.pivot(index="i1", columns="i2")
result = df_nonconsol.fillna(0)
assert result.isna().sum().sum() == 0
def test_fillna_nones_inplace():
# GH 48480
df = DataFrame(
[[None, None], [None, None]],
columns=["A", "B"],
)
result = df.fillna(value={"A": 1, "B": 2}, inplace=True)
assert result is df
expected = DataFrame([[1, 2], [1, 2]], columns=["A", "B"], dtype=object)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"data, expected_data, method, kwargs",
(
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan],
"ffill",
{"limit_area": "inside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan],
"ffill",
{"limit_area": "inside", "limit": 1},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0],
"ffill",
{"limit_area": "outside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan],
"ffill",
{"limit_area": "outside", "limit": 1},
),
(
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
"ffill",
{"limit_area": "outside", "limit": 1},
),
(
range(5),
range(5),
"ffill",
{"limit_area": "outside", "limit": 1},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan],
"bfill",
{"limit_area": "inside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan],
"bfill",
{"limit_area": "inside", "limit": 1},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
"bfill",
{"limit_area": "outside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
[np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
"bfill",
{"limit_area": "outside", "limit": 1},
),
),
)
def test_ffill_bfill_limit_area(data, expected_data, method, kwargs):
# GH#56492
df = DataFrame(data)
expected = DataFrame(expected_data)
result = getattr(df, method)(**kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_frame", [True, False])
@pytest.mark.parametrize("dtype", ["float", "object"])
def test_fillna_with_none_object(test_frame, dtype):
# GH#57723
obj = Series([1, np.nan, 3], dtype=dtype)
if test_frame:
obj = obj.to_frame()
result = obj.fillna(value=None)
expected = Series([1, None, 3], dtype=dtype)
if test_frame:
expected = expected.to_frame()
tm.assert_equal(result, expected)
def test_fillna_out_of_bounds_datetime():
# GH#61208
df = DataFrame(
{
"datetime": date_range("1/1/2011", periods=3, freq="h", unit="ns"),
"value": [1, 2, 3],
}
)
df.iloc[0, 0] = None
msg = "Cannot cast 0001-01-01 00:00:00 to unit='ns' without overflow"
with pytest.raises(OutOfBoundsDatetime, match=msg):
df.fillna(Timestamp("0001-01-01"))
| TestFillNA |
python | TheAlgorithms__Python | data_structures/binary_tree/serialize_deserialize_binary_tree.py | {
"start": 120,
"end": 3561
} | class ____:
"""
A binary tree node has a value, left child, and right child.
Props:
value: The value of the node.
left: The left child of the node.
right: The right child of the node.
"""
value: int = 0
left: TreeNode | None = None
right: TreeNode | None = None
def __post_init__(self):
if not isinstance(self.value, int):
raise TypeError("Value must be an integer.")
def __iter__(self) -> Iterator[TreeNode]:
"""
Iterate through the tree in preorder.
Returns:
An iterator of the tree nodes.
>>> list(TreeNode(1))
[1,null,null]
>>> tuple(TreeNode(1, TreeNode(2), TreeNode(3)))
(1,2,null,null,3,null,null, 2,null,null, 3,null,null)
"""
yield self
yield from self.left or ()
yield from self.right or ()
def __len__(self) -> int:
"""
Count the number of nodes in the tree.
Returns:
The number of nodes in the tree.
>>> len(TreeNode(1))
1
>>> len(TreeNode(1, TreeNode(2), TreeNode(3)))
3
"""
return sum(1 for _ in self)
def __repr__(self) -> str:
"""
Represent the tree as a string.
Returns:
A string representation of the tree.
>>> repr(TreeNode(1))
'1,null,null'
>>> repr(TreeNode(1, TreeNode(2), TreeNode(3)))
'1,2,null,null,3,null,null'
>>> repr(TreeNode(1, TreeNode(2), TreeNode(3, TreeNode(4), TreeNode(5))))
'1,2,null,null,3,4,null,null,5,null,null'
"""
return f"{self.value},{self.left!r},{self.right!r}".replace("None", "null")
@classmethod
def five_tree(cls) -> TreeNode:
"""
>>> repr(TreeNode.five_tree())
'1,2,null,null,3,4,null,null,5,null,null'
"""
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(4)
root.right.right = TreeNode(5)
return root
def deserialize(data: str) -> TreeNode | None:
"""
Deserialize a string to a binary tree.
Args:
data(str): The serialized string.
Returns:
The root of the binary tree.
>>> root = TreeNode.five_tree()
>>> serialzed_data = repr(root)
>>> deserialized = deserialize(serialzed_data)
>>> root == deserialized
True
>>> root is deserialized # two separate trees
False
>>> root.right.right.value = 6
>>> root == deserialized
False
>>> serialzed_data = repr(root)
>>> deserialized = deserialize(serialzed_data)
>>> root == deserialized
True
>>> deserialize("")
Traceback (most recent call last):
...
ValueError: Data cannot be empty.
"""
if not data:
raise ValueError("Data cannot be empty.")
# Split the serialized string by a comma to get node values
nodes = data.split(",")
def build_tree() -> TreeNode | None:
# Get the next value from the list
value = nodes.pop(0)
if value == "null":
return None
node = TreeNode(int(value))
node.left = build_tree() # Recursively build left subtree
node.right = build_tree() # Recursively build right subtree
return node
return build_tree()
if __name__ == "__main__":
import doctest
doctest.testmod()
| TreeNode |
python | pytorch__pytorch | test/distributed/_composable/test_checkpoint.py | {
"start": 1702,
"end": 1985
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.p = nn.Parameter(torch.randn(100, 100))
def forward(self, x):
y = torch.matmul(self.p, torch.randn(100, 100, device=self.p.device))
return torch.matmul(x, y)
| RandomModel |
python | dagster-io__dagster | python_modules/libraries/dagster-openai/dagster_openai/resources.py | {
"start": 649,
"end": 5469
} | class ____(Enum):
"""Supported endpoint classes of the OpenAI API v1."""
COMPLETIONS = "completions"
CHAT = "chat"
EMBEDDINGS = "embeddings"
API_ENDPOINT_CLASSES_TO_ENDPOINT_METHODS_MAPPING = {
ApiEndpointClassesEnum.COMPLETIONS: [["create"]],
ApiEndpointClassesEnum.CHAT: [["completions", "create"]],
ApiEndpointClassesEnum.EMBEDDINGS: [["create"]],
}
context_to_counters = WeakKeyDictionary()
def _add_to_asset_metadata(
context: AssetExecutionContext,
usage_metadata: dict[str, int],
output_name: Optional[str],
):
if context not in context_to_counters:
context_to_counters[context] = defaultdict(lambda: 0)
counters = context_to_counters[context]
for metadata_key, delta in usage_metadata.items():
counters[metadata_key] += delta
context.add_output_metadata(
metadata=dict(counters),
output_name=output_name,
)
@public
def with_usage_metadata(
context: Union[AssetExecutionContext, OpExecutionContext], output_name: Optional[str], func
):
"""This wrapper can be used on any endpoint of the
`openai library <https://github.com/openai/openai-python>`_
to log the OpenAI API usage metadata in the asset metadata.
Examples:
.. code-block:: python
from dagster import (
AssetExecutionContext,
AssetKey,
AssetSelection,
AssetSpec,
Definitions,
EnvVar,
MaterializeResult,
asset,
define_asset_job,
multi_asset,
)
from dagster_openai import OpenAIResource, with_usage_metadata
@asset(compute_kind="OpenAI")
def openai_asset(context: AssetExecutionContext, openai: OpenAIResource):
with openai.get_client(context) as client:
client.fine_tuning.jobs.create = with_usage_metadata(
context=context, output_name="some_output_name", func=client.fine_tuning.jobs.create
)
client.fine_tuning.jobs.create(model="gpt-3.5-turbo", training_file="some_training_file")
openai_asset_job = define_asset_job(name="openai_asset_job", selection="openai_asset")
@multi_asset(
specs=[
AssetSpec("my_asset1"),
AssetSpec("my_asset2"),
]
)
def openai_multi_asset(context: AssetExecutionContext, openai: OpenAIResource):
with openai.get_client(context, asset_key=AssetKey("my_asset1")) as client:
client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}]
)
# The materialization of `my_asset1` will include both OpenAI usage metadata
# and the metadata added when calling `MaterializeResult`.
return (
MaterializeResult(asset_key="my_asset1", metadata={"foo": "bar"}),
MaterializeResult(asset_key="my_asset2", metadata={"baz": "qux"}),
)
openai_multi_asset_job = define_asset_job(
name="openai_multi_asset_job", selection=AssetSelection.assets(openai_multi_asset)
)
Definitions(
assets=[openai_asset, openai_multi_asset],
jobs=[openai_asset_job, openai_multi_asset_job],
resources={
"openai": OpenAIResource(api_key=EnvVar("OPENAI_API_KEY")),
},
)
"""
if not isinstance(context, AssetExecutionContext):
raise DagsterInvariantViolationError(
"The `with_usage_metadata` can only be used when context is of type `AssetExecutionContext`."
)
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
calls_key = f"openai.{response.model}.calls"
total_tokens_key = f"openai.{response.model}.total_tokens"
prompt_tokens_key = f"openai.{response.model}.prompt_tokens"
completion_tokens_key = f"openai.{response.model}.completion_tokens"
usage = response.usage
usage_metadata = {
calls_key: 1,
total_tokens_key: usage.total_tokens,
prompt_tokens_key: usage.prompt_tokens,
}
if hasattr(usage, "completion_tokens"):
usage_metadata[completion_tokens_key] = usage.completion_tokens
_add_to_asset_metadata(
context=context,
usage_metadata=usage_metadata,
output_name=output_name,
)
return response
return wrapper
@public
| ApiEndpointClassesEnum |
python | getsentry__sentry | src/sentry/rules/history/endpoints/project_rule_group_history.py | {
"start": 1101,
"end": 1252
} | class ____(TypedDict):
group: BaseGroupSerializerResponse
count: int
lastTriggered: datetime
eventId: str | None
| RuleGroupHistoryResponse |
python | coleifer__peewee | tests/models.py | {
"start": 168532,
"end": 169269
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [User]
def test_get_with_second_database(self):
User.create(username='huey')
query = User.select().where(User.username == 'huey')
self.assertEqual(query.get().username, 'huey')
alt_db = get_in_memory_db()
with User.bind_ctx(alt_db):
User.create_table()
self.assertRaises(User.DoesNotExist, query.get, alt_db)
with User.bind_ctx(alt_db):
User.create(username='zaizee')
query = User.select().where(User.username == 'zaizee')
self.assertRaises(User.DoesNotExist, query.get)
self.assertEqual(query.get(alt_db).username, 'zaizee')
| TestGetWithSecondDatabase |
python | getsentry__sentry-python | sentry_sdk/integrations/grpc/client.py | {
"start": 566,
"end": 3373
} | class ____(
grpc.UnaryUnaryClientInterceptor, # type: ignore
grpc.UnaryStreamClientInterceptor, # type: ignore
):
_is_intercepted = False
def intercept_unary_unary(self, continuation, client_call_details, request):
# type: (ClientInterceptor, Callable[[ClientCallDetails, Message], _UnaryOutcome], ClientCallDetails, Message) -> _UnaryOutcome
method = client_call_details.method
with sentry_sdk.start_span(
op=OP.GRPC_CLIENT,
name="unary unary call to %s" % method,
origin=SPAN_ORIGIN,
) as span:
span.set_data("type", "unary unary")
span.set_data("method", method)
client_call_details = self._update_client_call_details_metadata_from_scope(
client_call_details
)
response = continuation(client_call_details, request)
span.set_data("code", response.code().name)
return response
def intercept_unary_stream(self, continuation, client_call_details, request):
# type: (ClientInterceptor, Callable[[ClientCallDetails, Message], Union[Iterable[Any], UnaryStreamCall]], ClientCallDetails, Message) -> Union[Iterator[Message], Call]
method = client_call_details.method
with sentry_sdk.start_span(
op=OP.GRPC_CLIENT,
name="unary stream call to %s" % method,
origin=SPAN_ORIGIN,
) as span:
span.set_data("type", "unary stream")
span.set_data("method", method)
client_call_details = self._update_client_call_details_metadata_from_scope(
client_call_details
)
response = continuation(client_call_details, request) # type: UnaryStreamCall
# Setting code on unary-stream leads to execution getting stuck
# span.set_data("code", response.code().name)
return response
@staticmethod
def _update_client_call_details_metadata_from_scope(client_call_details):
# type: (ClientCallDetails) -> ClientCallDetails
metadata = (
list(client_call_details.metadata) if client_call_details.metadata else []
)
for (
key,
value,
) in sentry_sdk.get_current_scope().iter_trace_propagation_headers():
metadata.append((key, value))
client_call_details = grpc._interceptor._ClientCallDetails(
method=client_call_details.method,
timeout=client_call_details.timeout,
metadata=metadata,
credentials=client_call_details.credentials,
wait_for_ready=client_call_details.wait_for_ready,
compression=client_call_details.compression,
)
return client_call_details
| ClientInterceptor |
python | python__mypy | mypyc/test/test_annotate.py | {
"start": 618,
"end": 2600
} | class ____(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
# Parse "# A: <message>" comments.
for i, line in enumerate(testcase.input):
if "# A:" in line:
msg = line.rpartition("# A:")[2].strip()
expected_output.append(f"main:{i + 1}: {msg}")
ir = None
try:
ir, tree, type_map, mapper = build_ir_for_single_file2(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
annotations = generate_annotations("native.py", tree, ir, type_map, mapper)
actual = []
for line_num, line_anns in sorted(
annotations.annotations.items(), key=lambda it: it[0]
):
anns = get_max_prio(line_anns)
str_anns = [a.message for a in anns]
s = " ".join(str_anns)
actual.append(f"main:{line_num}: {s}")
try:
assert_test_output(testcase, actual, "Invalid source code output", expected_output)
except BaseException:
if ir:
print("Generated IR:\n")
for fn in ir.functions:
if fn.name == "__top_level__":
continue
for s in format_func(fn):
print(s)
raise
| TestReport |
python | django__django | tests/reverse_lookup/tests.py | {
"start": 121,
"end": 1744
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?", creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?", creator=jim
)
Choice.objects.create(
poll=first_poll, related_poll=second_poll, name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(poll__question__exact="What's the first question?")
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(poll__question__exact="What's the second question?")
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
msg = (
"Cannot resolve keyword 'choice' into field. Choices are: "
"creator, creator_id, id, poll_choice, question, related_choice"
)
with self.assertRaisesMessage(FieldError, msg):
Poll.objects.get(choice__name__exact="This is the answer")
| ReverseLookupTests |
python | rapidsai__cudf | docs/cudf/source/_ext/PandasCompat.py | {
"start": 951,
"end": 1054
} | class ____(Directive):
def run(self):
return [PandasCompatList("")]
| PandasCompatListDirective |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/hardsigmoid_test.py | {
"start": 693,
"end": 1202
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {"input_one": torch.rand(N, C, H, W, device=device)}
self.op_func = op_func()
def forward(self, input_one):
return self.op_func(input_one)
op_bench.generate_pt_tests_from_op_list(
hardsigmoid_ops_list,
hardsigmoid_configs_short + hardsigmoid_configs_long,
HardsigmoidBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| HardsigmoidBenchmark |
python | skorch-dev__skorch | examples/image-classifier-finetuning/train.py | {
"start": 1488,
"end": 2313
} | class ____(BaseEstimator, TransformerMixin):
"""Image feature extractor
Parameters
----------
model_name : str (default='google/vit-base-patch32-224-in21k')
Name of the feature extractor on Hugging Face Hub.
device : str (default='cuda')
Computation device, typically 'cuda' or 'cpu'.
"""
def __init__(
self,
model_name='google/vit-base-patch32-224-in21k',
device='cuda',
):
self.model_name = model_name
self.device = device
def fit(self, X, y=None, **fit_params):
self.extractor_ = ViTFeatureExtractor.from_pretrained(
self.model_name, device=self.device,
)
return self
def transform(self, X):
return self.extractor_(X, return_tensors='pt')['pixel_values']
| FeatureExtractor |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_condition_evaluations.py | {
"start": 1263,
"end": 3159
} | class ____(graphene.ObjectType):
uniqueId = graphene.NonNull(graphene.String)
description = graphene.NonNull(graphene.String)
entityKey = graphene.NonNull(GrapheneEntityKey)
startTimestamp = graphene.Field(graphene.Float)
endTimestamp = graphene.Field(graphene.Float)
metadataEntries = non_null_list(GrapheneMetadataEntry)
status = graphene.NonNull(GrapheneAssetConditionEvaluationStatus)
childUniqueIds = non_null_list(graphene.String)
class Meta:
name = "UnpartitionedAssetConditionEvaluationNode"
def __init__(self, evaluation: AutomationConditionEvaluation):
self._evaluation = evaluation
if evaluation.true_subset.size > 0:
status = AssetConditionEvaluationStatus.TRUE
elif isinstance(evaluation.candidate_subset, SerializableEntitySubset) and (
evaluation.candidate_subset.size > 0
):
status = AssetConditionEvaluationStatus.FALSE
else:
status = AssetConditionEvaluationStatus.SKIPPED
super().__init__(
uniqueId=evaluation.condition_snapshot.unique_id,
description=evaluation.condition_snapshot.description,
startTimestamp=evaluation.start_timestamp,
endTimestamp=evaluation.end_timestamp,
status=status,
childUniqueIds=[
child.condition_snapshot.unique_id for child in evaluation.child_evaluations
],
entityKey=GrapheneEntityKey.from_entity_key(evaluation.key),
)
def resolve_metadataEntries(
self, graphene_info: ResolveInfo
) -> Sequence[GrapheneMetadataEntry]:
metadata = next(
(subset.metadata for subset in self._evaluation.subsets_with_metadata),
{},
)
return list(iterate_metadata_entries(metadata))
| GrapheneUnpartitionedAssetConditionEvaluationNode |
python | pypa__pip | src/pip/_internal/resolution/resolvelib/candidates.py | {
"start": 9178,
"end": 11387
} | class ____(_InstallRequirementBackedCandidate):
is_editable = False
def __init__(
self,
link: Link,
template: InstallRequirement,
factory: Factory,
name: NormalizedName | None = None,
version: Version | None = None,
) -> None:
source_link = link
cache_entry = factory.get_wheel_cache_entry(source_link, name)
if cache_entry is not None:
logger.debug("Using cached wheel link: %s", cache_entry.link)
link = cache_entry.link
ireq = make_install_req_from_link(link, template)
assert ireq.link == link
if ireq.link.is_wheel and not ireq.link.is_file:
wheel = Wheel(ireq.link.filename)
wheel_name = wheel.name
assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel"
# Version may not be present for PEP 508 direct URLs
if version is not None:
wheel_version = Version(wheel.version)
assert (
version == wheel_version
), f"{version!r} != {wheel_version!r} for wheel {name}"
if cache_entry is not None:
assert ireq.link.is_wheel
assert ireq.link.is_file
if cache_entry.persistent and template.link is template.original_link:
ireq.cached_wheel_source_link = source_link
if cache_entry.origin is not None:
ireq.download_info = cache_entry.origin
else:
# Legacy cache entry that does not have origin.json.
# download_info may miss the archive_info.hashes field.
ireq.download_info = direct_url_from_link(
source_link, link_is_in_wheel_cache=cache_entry.persistent
)
super().__init__(
link=link,
source_link=source_link,
ireq=ireq,
factory=factory,
name=name,
version=version,
)
def _prepare_distribution(self) -> BaseDistribution:
preparer = self._factory.preparer
return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
| LinkCandidate |
python | python__mypy | mypy/test/testconstraints.py | {
"start": 260,
"end": 5267
} | class ____(Suite):
def setUp(self) -> None:
self.fx = TypeFixture()
def test_no_type_variables(self) -> None:
assert not infer_constraints(self.fx.o, self.fx.o, SUBTYPE_OF)
def test_basic_type_variable(self) -> None:
fx = self.fx
for direction in [SUBTYPE_OF, SUPERTYPE_OF]:
assert infer_constraints(fx.gt, fx.ga, direction) == [
Constraint(type_var=fx.t, op=direction, target=fx.a)
]
def test_basic_type_var_tuple_subtype(self) -> None:
fx = self.fx
assert infer_constraints(
Instance(fx.gvi, [UnpackType(fx.ts)]), Instance(fx.gvi, [fx.a, fx.b]), SUBTYPE_OF
) == [
Constraint(type_var=fx.ts, op=SUBTYPE_OF, target=TupleType([fx.a, fx.b], fx.std_tuple))
]
def test_basic_type_var_tuple(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gvi, [UnpackType(fx.ts)]), Instance(fx.gvi, [fx.a, fx.b]), SUPERTYPE_OF
)
) == {
Constraint(
type_var=fx.ts, op=SUPERTYPE_OF, target=TupleType([fx.a, fx.b], fx.std_tuple)
),
Constraint(
type_var=fx.ts, op=SUBTYPE_OF, target=TupleType([fx.a, fx.b], fx.std_tuple)
),
}
def test_type_var_tuple_with_prefix_and_suffix(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gv2i, [fx.t, UnpackType(fx.ts), fx.s]),
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
SUPERTYPE_OF,
)
) == {
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
Constraint(
type_var=fx.ts, op=SUPERTYPE_OF, target=TupleType([fx.b, fx.c], fx.std_tuple)
),
Constraint(
type_var=fx.ts, op=SUBTYPE_OF, target=TupleType([fx.b, fx.c], fx.std_tuple)
),
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.d),
}
def test_unpack_homogeneous_tuple(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gvi, [UnpackType(Instance(fx.std_tuplei, [fx.t]))]),
Instance(fx.gvi, [fx.a, fx.b]),
SUPERTYPE_OF,
)
) == {
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
Constraint(type_var=fx.t, op=SUBTYPE_OF, target=fx.a),
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.b),
Constraint(type_var=fx.t, op=SUBTYPE_OF, target=fx.b),
}
def test_unpack_homogeneous_tuple_with_prefix_and_suffix(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gv2i, [fx.t, UnpackType(Instance(fx.std_tuplei, [fx.s])), fx.u]),
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
SUPERTYPE_OF,
)
) == {
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.b),
Constraint(type_var=fx.s, op=SUBTYPE_OF, target=fx.b),
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.c),
Constraint(type_var=fx.s, op=SUBTYPE_OF, target=fx.c),
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
}
def test_unpack_with_prefix_and_suffix(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gv2i, [fx.u, fx.t, fx.s, fx.u]),
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
SUPERTYPE_OF,
)
) == {
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.a),
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.b),
Constraint(type_var=fx.t, op=SUBTYPE_OF, target=fx.b),
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.c),
Constraint(type_var=fx.s, op=SUBTYPE_OF, target=fx.c),
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
}
def test_unpack_tuple_length_non_match(self) -> None:
fx = self.fx
assert set(
infer_constraints(
Instance(fx.gv2i, [fx.u, fx.t, fx.s, fx.u]),
Instance(fx.gv2i, [fx.a, fx.b, fx.d]),
SUPERTYPE_OF,
)
# We still get constraints on the prefix/suffix in this case.
) == {
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.a),
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
}
def test_var_length_tuple_with_fixed_length_tuple(self) -> None:
fx = self.fx
assert not infer_constraints(
TupleType([fx.t, fx.s], fallback=Instance(fx.std_tuplei, [fx.o])),
Instance(fx.std_tuplei, [fx.a]),
SUPERTYPE_OF,
)
| ConstraintsSuite |
python | sqlalchemy__sqlalchemy | test/sql/test_defaults.py | {
"start": 31604,
"end": 33775
} | class ____(fixtures.TablesTest):
run_define_tables = "each"
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"aitable",
metadata,
Column(
"id",
Integer,
normalize_sequence(
config, Sequence("ai_id_seq", optional=True)
),
primary_key=True,
),
Column("int1", Integer),
Column("str1", String(20)),
)
def test_autoincrement(self, connection):
aitable = self.tables.aitable
ids = set()
rs = connection.execute(aitable.insert(), dict(int1=1))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = connection.execute(aitable.insert(), dict(str1="row 2"))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = connection.execute(aitable.insert(), dict(int1=3, str1="row 3"))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = connection.execute(
aitable.insert().values({"int1": func.length("four")})
)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(
ids,
set(
range(
testing.db.dialect.default_sequence_base,
testing.db.dialect.default_sequence_base + 4,
)
),
)
eq_(
list(connection.execute(aitable.select().order_by(aitable.c.id))),
[
(testing.db.dialect.default_sequence_base, 1, None),
(testing.db.dialect.default_sequence_base + 1, None, "row 2"),
(testing.db.dialect.default_sequence_base + 2, 3, "row 3"),
(testing.db.dialect.default_sequence_base + 3, 4, None),
],
)
| PKIncrementTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 56536,
"end": 57312
} | class ____(_PrintableStructure):
_fields_ = [
# Moved to the new busId location below
('busIdLegacy', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE),
('domain', c_uint),
('bus', c_uint),
('device', c_uint),
('pciDeviceId', c_uint),
# Added in 2.285
('pciSubSystemId', c_uint),
# New busId replaced the long deprecated and reserved fields with a
# field of the same size in 9.0
('busId', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE),
]
_fmt_ = {
'domain' : "0x%08X",
'bus' : "0x%02X",
'device' : "0x%02X",
'pciDeviceId' : "0x%08X",
'pciSubSystemId' : "0x%08X",
}
| nvmlPciInfo_t |
python | pikepdf__pikepdf | tests/test_pdf.py | {
"start": 5812,
"end": 13116
} | class ____:
def test_memory(self, resources):
data = (resources / 'pal-1bit-trivial.pdf').read_bytes()
with pytest.warns(UserWarning, match="bytes-like object containing a PDF"):
with pytest.raises(Exception):
Pdf.open(data)
def test_remove_unreferenced(resources, outdir):
in_ = resources / 'sandwich.pdf'
out1 = outdir / 'out1.pdf'
out2 = outdir / 'out2.pdf'
with Pdf.open(in_) as pdf:
pdf.pages[0].Contents = Stream(pdf, b' ')
pdf.save(out1)
pdf.remove_unreferenced_resources()
pdf.save(out2)
assert out2.stat().st_size < out1.stat().st_size
def test_show_xref(trivial, caplog):
with caplog.at_level(logging.INFO):
trivial.show_xref_table()
assert '1/0' in caplog.records[0].message
def test_progress(trivial, outdir):
pdf = trivial
mock = Mock()
pdf.save(outdir / 'out.pdf', progress=mock)
mock.assert_called()
@pytest.mark.skipif(locale.getpreferredencoding() != 'UTF-8', reason="Unicode check")
@pytest.mark.skipif(os.name == 'nt', reason="Windows can be inconsistent")
def test_unicode_filename(resources, outdir):
target1 = outdir / '测试.pdf' # Chinese: test.pdf
target2 = outdir / '通过考试.pdf' # Chinese: pass the test.pdf
shutil.copy(fspath(resources / 'pal-1bit-trivial.pdf'), fspath(target1))
with Pdf.open(target1) as pdf:
pdf.save(target2)
assert target2.exists()
def test_min_and_force_version(trivial, outdir):
pdf = trivial
pdf.save(outdir / '1.7.pdf', min_version='1.7')
with Pdf.open(outdir / '1.7.pdf') as pdf17:
assert pdf17.pdf_version == '1.7'
pdf.save(outdir / '1.2.pdf', force_version='1.2')
with Pdf.open(outdir / '1.2.pdf') as pdf12:
assert pdf12.pdf_version == '1.2'
def test_normalize_linearize(trivial, outdir):
with pytest.raises(ValueError):
trivial.save(outdir / 'no.pdf', linearize=True, normalize_content=True)
def test_make_stream(trivial, outdir):
pdf = trivial
stream = pdf.make_stream(b'q Q')
pdf.pages[0].Contents = stream
pdf.save(outdir / 's.pdf')
def test_add_blank_page(trivial):
assert len(trivial.pages) == 1
invalid = [-1, 0, 2, 15000]
for n in invalid:
with pytest.raises(ValueError):
trivial.add_blank_page(page_size=(n, n))
trivial.add_blank_page()
assert len(trivial.pages) == 2
def test_object_stream_mode_generated(trivial, outdir):
trivial.save(
outdir / '1.pdf',
fix_metadata_version=True,
object_stream_mode=pikepdf.ObjectStreamMode.generate,
)
assert b'/ObjStm' in (outdir / '1.pdf').read_bytes()
trivial.save(
outdir / '2.pdf',
fix_metadata_version=False,
object_stream_mode=pikepdf.ObjectStreamMode.generate,
)
assert b'/ObjStm' in (outdir / '2.pdf').read_bytes()
def test_with_block(resources):
desc = ''
with pikepdf.open(resources / 'pal-1bit-trivial.pdf') as pdf:
desc = pdf.filename
assert pdf.filename != desc
def test_closed_anon_pdf():
pdf = pikepdf.new()
desc = pdf.filename
pdf.close()
assert pdf.filename != desc
def test_with_block_abuse(resources):
with pikepdf.open(resources / 'pal-1bit-trivial.pdf') as pdf:
im0 = pdf.pages[0].Resources.XObject['/Im0']
with pytest.raises(PdfError):
im0.read_bytes()
def test_allow_overwriting_input(resources, tmp_path):
orig_pdf_path = fspath(resources / 'pal-1bit-trivial.pdf')
tmp_pdf_path = fspath(tmp_path / 'pal-1bit-trivial.pdf')
shutil.copy(orig_pdf_path, tmp_pdf_path)
with pikepdf.open(tmp_pdf_path, allow_overwriting_input=True) as pdf:
with pdf.open_metadata() as meta:
meta['dc:title'] = 'New Title'
pdf.save('other.pdf', encryption=dict(owner="owner"))
pdf.save()
pdf.save(linearize=True)
with pikepdf.open(tmp_pdf_path) as pdf:
with pdf.open_metadata() as meta:
assert meta['dc:title'] == 'New Title'
with pikepdf.open(orig_pdf_path) as pdf:
with pdf.open_metadata() as meta:
assert 'dc:title' not in meta
def test_allow_overwriting_input_without_filename():
with pytest.raises(ValueError):
with pikepdf.open(BytesIO(), allow_overwriting_input=True):
pass
def test_allow_overwriting_input_from_pdf_new():
pdf = Pdf.new()
with pytest.raises(ValueError, match="allow_overwriting_input=True"):
pdf.save()
def test_check(resources):
with pikepdf.open(resources / 'content-stream-errors.pdf') as pdf:
problems = pdf.check_pdf_syntax()
assert len(problems) > 0
assert all(isinstance(prob, str) for prob in problems)
assert 'parse error while reading' in problems[0]
def test_repr(trivial):
assert repr(trivial).startswith('<')
def test_recompress(resources, outdir):
with pikepdf.open(resources / 'image-mono-inline.pdf') as pdf:
obj = pdf.get_object((7, 0))
assert isinstance(obj, pikepdf.Stream)
data = obj.read_bytes()
data_z1 = zlib.compress(data, level=0) # No compression but zlib wrapper
obj.write(data_z1, filter=pikepdf.Name.FlateDecode)
bigger = outdir / 'a.pdf'
smaller = outdir / 'b.pdf'
pdf.save(bigger, recompress_flate=False)
pdf.save(smaller, recompress_flate=True)
assert smaller.stat().st_size < bigger.stat().st_size
def test_invalid_flate_compression_level():
# We don't want to change the compression level because it's global state
# and will change subsequent test results, so just ping it with an invalid
# value to get partial code coverage.
with pytest.raises(ValueError):
pikepdf.settings.set_flate_compression_level(99)
def test_flate_compression_level():
# While this function affects global state, we can test it safely because
# setting the value to -1 restores the default.
try:
pikepdf.settings.set_flate_compression_level(0)
pikepdf.settings.set_flate_compression_level(9)
finally:
pikepdf.settings.set_flate_compression_level(-1)
def test_set_access_default_mmap():
initial = pikepdf._core.get_access_default_mmap()
try:
pikepdf._core.set_access_default_mmap(True)
finally:
pikepdf._core.set_access_default_mmap(initial)
def test_generate_appearance_streams(pdf_form):
assert Name.AP not in pdf_form.Root.AcroForm.Fields[0]
pdf_form.Root.AcroForm.NeedAppearances = True
pdf_form.generate_appearance_streams()
assert Name.AP in pdf_form.Root.AcroForm.Fields[0]
@pytest.mark.parametrize(
'mode, exc',
[('all', None), ('print', None), ('screen', None), ('', None), ('42', ValueError)],
)
def test_flatten_annotations_parameters(pdf_form, mode, exc):
if exc is not None:
error_ctx = pytest.raises(exc)
else:
error_ctx = nullcontext()
with error_ctx:
if mode is None:
pdf_form.flatten_annotations()
else:
pdf_form.flatten_annotations(mode)
def test_refcount_chaining(resources):
# Ensure we can chain without crashing when Pdf is not properly opened or
# assigned a name
Pdf.open(resources / 'pal-1bit-trivial.pdf').pages[0]
| TestMemory |
python | PyCQA__pylint | tests/functional/u/useless/useless_object_inheritance.py | {
"start": 427,
"end": 513
} | class ____(object, C, metaclass=abc.ABCMeta): # [useless-object-inheritance]
pass
| D |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/conversational_retrieval/base.py | {
"start": 18609,
"end": 21128
} | class ____(BaseConversationalRetrievalChain):
"""Chain for chatting with a vector database."""
vectorstore: VectorStore = Field(alias="vectorstore")
top_k_docs_for_context: int = 4
search_kwargs: dict = Field(default_factory=dict)
@property
def _chain_type(self) -> str:
return "chat-vector-db"
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`ChatVectorDBChain` is deprecated - "
"please use `from langchain_classic.chains import "
"ConversationalRetrievalChain`",
stacklevel=4,
)
return values
@override
def _get_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
vectordbkwargs = inputs.get("vectordbkwargs", {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(
question,
k=self.top_k_docs_for_context,
**full_kwargs,
)
async def _aget_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
msg = "ChatVectorDBChain does not support async"
raise NotImplementedError(msg)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
combine_docs_chain_kwargs: dict | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
condense_question_chain = LLMChain(
llm=llm,
prompt=condense_question_prompt,
callbacks=callbacks,
)
return cls(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
| ChatVectorDBChain |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 48530,
"end": 54571
} | class ____(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length scale
parameter :math:`l>0`, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
.. math::
k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
where :math:`l` is the length scale of the kernel and
:math:`d(\\cdot,\\cdot)` is the Euclidean distance.
For advice on how to set the length scale parameter, see e.g. [1]_.
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
.. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866
>>> gpc.predict_proba(X[:2,:])
array([[0.8354, 0.03228, 0.1322],
[0.7906, 0.0652, 0.1441]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter(
"length_scale",
"numeric",
self.length_scale_bounds,
len(self.length_scale),
)
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale**2
)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
)
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0]
)
| RBF |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 23427,
"end": 25525
} | class ____(ExtensionType):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(
self,
require_explicit_policy: int | None,
inhibit_policy_mapping: int | None,
) -> None:
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, int
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, int
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self) -> str:
return (
"<PolicyConstraints(require_explicit_policy={0.require_explicit"
"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
"mapping})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy
and self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __hash__(self) -> int:
return hash(
(self.require_explicit_policy, self.inhibit_policy_mapping)
)
@property
def require_explicit_policy(self) -> int | None:
return self._require_explicit_policy
@property
def inhibit_policy_mapping(self) -> int | None:
return self._inhibit_policy_mapping
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
| PolicyConstraints |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 10526,
"end": 10651
} | class ____(sgqlc.types.Scalar):
"""
See source code for more info.
"""
__schema__ = graphql_schema
| GitObjectID |
python | apache__airflow | providers/opsgenie/tests/unit/opsgenie/operators/test_opsgenie.py | {
"start": 5761,
"end": 6781
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
@mock.patch("airflow.providers.opsgenie.operators.opsgenie.OpsgenieAlertHook")
def test_operator(self, mock_opsgenie_hook):
mock_opsgenie_hook.return_value = mock.Mock()
mock_opsgenie_hook.return_value.delete_alert.return_value = True
operator = OpsgenieDeleteAlertOperator(
task_id="opsgenie_test_delete_job",
dag=self.dag,
identifier="id",
identifier_type="id",
user="name",
source="source",
)
operator.execute(None)
mock_opsgenie_hook.assert_called_once_with("opsgenie_default")
mock_opsgenie_hook.return_value.delete_alert.assert_called_once_with(
identifier="id",
identifier_type="id",
source="source",
user="name",
)
| TestOpsgenieDeleteAlertOperator |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-dappier/tests/test_tools_dappier_ai_recommendations.py | {
"start": 1641,
"end": 10670
} | class ____:
def test_init_without_api_key_raises_value_error(self, monkeypatch, dappier_client):
monkeypatch.delenv("DAPPIER_API_KEY", raising=False)
with patch("dappier.Dappier", return_value=dappier_client):
with pytest.raises(ValueError) as excinfo:
DappierAIRecommendationsToolSpec()
assert "API key is required" in str(excinfo.value)
def test_get_sports_news_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_sports_news_recommendations(
"sports query",
similarity_top_k=5,
ref="example.com",
num_articles_ref=2,
search_algorithm="trending",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="sports query",
data_model_id="dm_01j0pb465keqmatq9k83dthx34",
similarity_top_k=5,
ref="example.com",
num_articles_ref=2,
search_algorithm="trending",
)
def test_get_sports_news_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_sports_news_recommendations("sports query")
assert result == "The API response was not successful."
def test_get_lifestyle_news_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_lifestyle_news_recommendations(
"lifestyle query",
similarity_top_k=8,
ref="lifestyle.com",
num_articles_ref=1,
search_algorithm="semantic",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="lifestyle query",
data_model_id="dm_01j0q82s4bfjmsqkhs3ywm3x6y",
similarity_top_k=8,
ref="lifestyle.com",
num_articles_ref=1,
search_algorithm="semantic",
)
def test_get_lifestyle_news_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_lifestyle_news_recommendations(
"lifestyle query"
)
assert result == "The API response was not successful."
def test_get_iheartdogs_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_iheartdogs_recommendations(
"dog query",
similarity_top_k=3,
ref="dogsite.com",
num_articles_ref=0,
search_algorithm="most_recent",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="dog query",
data_model_id="dm_01j1sz8t3qe6v9g8ad102kvmqn",
similarity_top_k=3,
ref="dogsite.com",
num_articles_ref=0,
search_algorithm="most_recent",
)
def test_get_iheartdogs_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_iheartdogs_recommendations("dog query")
assert result == "The API response was not successful."
def test_get_iheartcats_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_iheartcats_recommendations(
"cat query",
similarity_top_k=7,
ref="catsite.com",
num_articles_ref=1,
search_algorithm="most_recent_semantic",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="cat query",
data_model_id="dm_01j1sza0h7ekhaecys2p3y0vmj",
similarity_top_k=7,
ref="catsite.com",
num_articles_ref=1,
search_algorithm="most_recent_semantic",
)
def test_get_iheartcats_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_iheartcats_recommendations("cat query")
assert result == "The API response was not successful."
def test_get_greenmonster_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_greenmonster_recommendations(
"greenmonster query",
similarity_top_k=4,
ref="green.com",
num_articles_ref=2,
search_algorithm="trending",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="greenmonster query",
data_model_id="dm_01j5xy9w5sf49bm6b1prm80m27",
similarity_top_k=4,
ref="green.com",
num_articles_ref=2,
search_algorithm="trending",
)
def test_get_greenmonster_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_greenmonster_recommendations(
"greenmonster query"
)
assert result == "The API response was not successful."
def test_get_wishtv_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_wishtv_recommendations(
"tv query",
similarity_top_k=6,
ref="tv.com",
num_articles_ref=3,
search_algorithm="semantic",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="tv query",
data_model_id="dm_01jagy9nqaeer9hxx8z1sk1jx6",
similarity_top_k=6,
ref="tv.com",
num_articles_ref=3,
search_algorithm="semantic",
)
def test_get_wishtv_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_wishtv_recommendations("tv query")
assert result == "The API response was not successful."
def test_get_nine_and_ten_news_recommendations_success(
self, recommendations_tool, dappier_client, success_response
):
response, expected_output = success_response
dappier_client.get_ai_recommendations.return_value = response
result = recommendations_tool.get_nine_and_ten_news_recommendations(
"local news query",
similarity_top_k=9,
ref="localnews.com",
num_articles_ref=5,
search_algorithm="most_recent_semantic",
)
assert result == expected_output
dappier_client.get_ai_recommendations.assert_called_once_with(
query="local news query",
data_model_id="dm_01jhtt138wf1b9j8jwswye99y5",
similarity_top_k=9,
ref="localnews.com",
num_articles_ref=5,
search_algorithm="most_recent_semantic",
)
def test_get_nine_and_ten_news_recommendations_failure(
self, recommendations_tool, dappier_client, failure_response
):
dappier_client.get_ai_recommendations.return_value = failure_response
result = recommendations_tool.get_nine_and_ten_news_recommendations(
"local news query"
)
assert result == "The API response was not successful."
| TestDappierAIRecommendationsToolSpec |
python | tox-dev__tox | src/tox/execute/api.py | {
"start": 724,
"end": 1844
} | class ____:
def __init__(self, env: ToxEnv) -> None:
self._env = env
@classmethod
def register_conf(cls, env: ToxEnv) -> None:
env.conf.add_config(
keys=["suicide_timeout"],
desc="timeout to allow process to exit before sending SIGINT",
of_type=float,
default=0.0,
)
env.conf.add_config(
keys=["interrupt_timeout"],
desc="timeout before sending SIGTERM after SIGINT",
of_type=float,
default=0.3,
)
env.conf.add_config(
keys=["terminate_timeout"],
desc="timeout before sending SIGKILL after SIGTERM",
of_type=float,
default=0.2,
)
@property
def suicide_timeout(self) -> float:
return cast("float", self._env.conf["suicide_timeout"])
@property
def interrupt_timeout(self) -> float:
return cast("float", self._env.conf["interrupt_timeout"])
@property
def terminate_timeout(self) -> float:
return cast("float", self._env.conf["terminate_timeout"])
| ExecuteOptions |
python | doocs__leetcode | solution/3000-3099/3002.Maximum Size of a Set After Removals/Solution.py | {
"start": 0,
"end": 280
} | class ____:
def maximumSetSize(self, nums1: List[int], nums2: List[int]) -> int:
s1 = set(nums1)
s2 = set(nums2)
n = len(nums1)
a = min(len(s1 - s2), n // 2)
b = min(len(s2 - s1), n // 2)
return min(a + b + len(s1 & s2), n)
| Solution |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 29136,
"end": 35633
} | class ____:
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64, np.longdouble]
dtypes = real_dtypes + [np.complex64, np.complex128, np.clongdouble]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = (f"{routine.__name__}({x.dtype}{x.shape!r}, {fftsize!r}, "
f"axis={axis!r}, overwrite_x={overwrite_x!r})")
if not should_overwrite:
assert_equal(x2, x, err_msg=f"spurious overwrite in {sig}")
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis])
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.clongdouble, np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
def part_shape(shape, axes):
if axes is None:
return shape
else:
return tuple(np.take(shape, axes))
def should_overwrite(data, shape, axes):
s = part_shape(data.shape, axes)
return (overwrite_x and
np.prod(shape) <= np.prod(s)
and dtype in overwritable_dtypes)
for fftshape in fftshape_iter(part_shape(shape, axes)):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(data, fftshape, axes))
if data.ndim > 1:
# check fortran order
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(
data.T, fftshape, axes))
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.clongdouble, np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
rfft, irfft, rfftn, irfftn])
def test_invalid_norm(func):
x = np.arange(10, dtype=float)
with assert_raises(ValueError,
match='Invalid norm value \'o\', should be'
' "backward", "ortho" or "forward"'):
func(x, norm='o')
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
irfft, irfftn, hfft, hfftn])
def test_swapped_byte_order_complex(func):
rng = np.random.RandomState(1234)
x = rng.rand(10) + 1j * rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn])
def test_swapped_byte_order_real(func):
rng = np.random.RandomState(1234)
x = rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
| TestOverwrite |
python | django__django | tests/model_fields/test_imagefield.py | {
"start": 12861,
"end": 13171
} | class ____(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
@skipIf(Image is None, "Pillow is required to test ImageField")
| ImageFieldUsingFileTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.