language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/auth/providers/dummy.py | {
"start": 1120,
"end": 2268
} | class ____(Provider):
name = "Dummy"
key = "dummy"
def get_auth_pipeline(self) -> Sequence[AuthView]:
return [AskEmail()]
def build_identity(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
return {
"id": MigratingIdentityId(
id=state.get("id", state["email"]), legacy_id=state.get("legacy_email")
),
"email": state["email"],
"email_verified": state["email_verified"],
"name": "Dummy",
}
def refresh_identity(self, auth_identity: AuthIdentity) -> None:
pass
def build_config(self, state: Mapping[str, Any]) -> dict[str, Any]:
return {}
dummy_provider_config = {
"idp": {
"entity_id": "https://example.com/saml/metadata/1234",
"x509cert": "foo_x509_cert",
"sso_url": "http://example.com/sso_url",
"slo_url": "http://example.com/slo_url",
},
"attribute_mapping": {
Attributes.IDENTIFIER: "user_id",
Attributes.USER_EMAIL: "email",
Attributes.FIRST_NAME: "first_name",
Attributes.LAST_NAME: "last_name",
},
}
| DummyProvider |
python | readthedocs__readthedocs.org | readthedocs/api/v2/admin.py | {
"start": 173,
"end": 318
} | class ____(APIKeyModelAdmin):
raw_id_fields = ["project"]
search_fields = [*APIKeyModelAdmin.search_fields, "project__slug"]
| BuildAPIKeyAdmin |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink45.py | {
"start": 315,
"end": 959
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink45.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9",
self.image_dir + "red.png",
{"url": r"external:\\Vboxsvr\share\foo bar.xlsx#'Some Sheet'!A1"},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/endpoints-frameworks-v2/iata/main.py | {
"start": 1272,
"end": 1480
} | class ____(messages.Message):
airports = messages.MessageField(Airport, 1, repeated=True)
# [END endpoints_iata_messages]
# [START endpoints_iata_api]
@endpoints.api(name="iata", version="v1")
| AirportList |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 404,
"end": 5411
} | class ____(NonStrictDataModel):
"""
:param label: Lucene format query (see lucene query syntax). Default search
field is label.keyword and default operator is AND, so searching for:
'Bus Stop' Blue
is equivalent to:
Label.keyword:'Bus Stop' AND label.keyword:'Blue'
:type label: str
:param count_range: Range of times ROI appears in the frame (min, max). -1 for
not applicable. Both integers must be larger than or equal to -1. 2nd integer
(max) must be either -1 or larger than or equal to the 1st integer (min)
:type count_range: Sequence[int]
:param conf_range: Range of ROI confidence level in the frame (min, max). -1
for not applicable Both min and max can be either -1 or positive. 2nd number
(max) must be either -1 or larger than or equal to the 1st number (min)
:type conf_range: Sequence[float]
:param must_not: If set then the label must not exist or lucene query must not
be true. The default value is false
:type must_not: bool
"""
_schema = {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min"
" and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger"
" than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both"
" integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or"
" larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default"
" operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus"
" Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The default"
" value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
}
def __init__(
self, label, count_range=None, conf_range=None, must_not=False, **kwargs
):
super(FilterLabelRule, self).__init__(**kwargs)
self.label = label
self.count_range = count_range
self.conf_range = conf_range
self.must_not = must_not
@schema_property("label")
def label(self):
return self._property_label
@label.setter
def label(self, value):
if value is None:
self._property_label = None
return
self.assert_isinstance(value, "label", six.string_types)
self._property_label = value
@schema_property("count_range")
def count_range(self):
return self._property_count_range
@count_range.setter
def count_range(self, value):
if value is None:
self._property_count_range = None
return
self.assert_isinstance(value, "count_range", (list, tuple))
value = [
int(v) if isinstance(v, float) and v.is_integer() else v for v in value
]
self.assert_isinstance(value, "count_range", six.integer_types, is_array=True)
self._property_count_range = value
@schema_property("conf_range")
def conf_range(self):
return self._property_conf_range
@conf_range.setter
def conf_range(self, value):
if value is None:
self._property_conf_range = None
return
self.assert_isinstance(value, "conf_range", (list, tuple))
self.assert_isinstance(
value, "conf_range", six.integer_types + (float,), is_array=True
)
self._property_conf_range = value
@schema_property("must_not")
def must_not(self):
return self._property_must_not
@must_not.setter
def must_not(self, value):
if value is None:
self._property_must_not = None
return
self.assert_isinstance(value, "must_not", (bool,))
self._property_must_not = value
| FilterLabelRule |
python | scikit-learn__scikit-learn | sklearn/neighbors/_base.py | {
"start": 26005,
"end": 38763
} | class ____:
"""Mixin for k-neighbors searches."""
def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):
"""Reduce a chunk of distances to the nearest neighbors.
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : array of shape (n_samples_chunk, n_neighbors)
Returned only if `return_distance=True`.
neigh : array of shape (n_samples_chunk, n_neighbors)
The neighbors indices.
"""
sample_range = np.arange(dist.shape[0])[:, None]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == "euclidean":
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
return result
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Find the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True.
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors)
elif not isinstance(n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, enter integer value"
% type(n_neighbors)
)
ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True
query_is_train = X is None
if query_is_train:
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
else:
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
reset=False,
order="C",
)
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
if query_is_train:
n_neighbors -= 1 # ok to modify inplace because an error is raised
inequality_str = "n_neighbors < n_samples_fit"
else:
inequality_str = "n_neighbors <= n_samples_fit"
raise ValueError(
f"Expected {inequality_str}, but "
f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, "
f"n_samples = {X.shape[0]}" # include n_samples for common tests
)
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
use_pairwise_distances_reductions = (
self._fit_method == "brute"
and ArgKmin.is_usable_for(
X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
)
)
if use_pairwise_distances_reductions:
results = ArgKmin.compute(
X=X,
Y=self._fit_X,
k=n_neighbors,
metric=self.effective_metric_,
metric_kwargs=self.effective_metric_params_,
strategy="auto",
return_distance=return_distance,
)
elif (
self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
):
results = _kneighbors_from_graph(
X, n_neighbors=n_neighbors, return_distance=return_distance
)
elif self._fit_method == "brute":
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions
# support:
# - DistanceMetrics which work on supposedly binary data
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
reduce_func = partial(
self._kneighbors_reduce_func,
n_neighbors=n_neighbors,
return_distance=return_distance,
)
# for efficiency, use squared euclidean distances
if self.effective_metric_ == "euclidean":
kwds = {"squared": True}
else:
kwds = self.effective_metric_params_
chunked_results = list(
pairwise_distances_chunked(
X,
self._fit_X,
reduce_func=reduce_func,
metric=self.effective_metric_,
n_jobs=n_jobs,
**kwds,
)
)
elif self._fit_method in ["ball_tree", "kd_tree"]:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method
)
chunked_results = Parallel(n_jobs, prefer="threads")(
delayed(self._tree.query)(X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
else:
raise ValueError("internal: _fit_method not recognized")
if chunked_results is not None:
if return_distance:
neigh_dist, neigh_ind = zip(*chunked_results)
results = np.vstack(neigh_dist), np.vstack(neigh_ind)
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
n_queries, _ = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(
neigh_dist[sample_mask], (n_queries, n_neighbors - 1)
)
return neigh_dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X)
NearestNeighbors(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == "connectivity":
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == "distance":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
f'or "distance" but got "{mode}" instead'
)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = csr_matrix(
(A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)
)
return kneighbors_graph
| KNeighborsMixin |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/natural_language.py | {
"start": 7872,
"end": 10919
} | class ____(GoogleCloudBaseOperator):
"""
Analyzes the sentiment of the provided text.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeSentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_analyze_sentiment_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_analyze_sentiment_template_fields]
def __init__(
self,
*,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start sentiment analyze")
response = hook.analyze_sentiment(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished sentiment analyze")
return MessageToDict(response._pb)
| CloudNaturalLanguageAnalyzeSentimentOperator |
python | ipython__ipython | IPython/terminal/embed.py | {
"start": 4167,
"end": 4323
} | class ____:
def __init__(self, repr):
assert isinstance(repr, str)
self.repr = repr
def __repr__(self):
return repr
| _Sentinel |
python | bokeh__bokeh | tests/unit/bokeh/core/test_has_props.py | {
"start": 12961,
"end": 13911
} | class ____(hp.HasProps):
foo = Int(default=lambda: 10)
def test_HasProps_apply_theme_func_default() -> None:
# check applying multiple themes
c = IntFuncDefault()
assert c.foo == 10
theme = dict(foo=20)
c.apply_theme(theme)
assert c.foo == 20
theme = dict(foo=30)
c.apply_theme(theme)
assert c.foo == 30
# check user set before theme
c = IntFuncDefault()
theme = dict(foo=30)
c.foo = 50
c.apply_theme(theme)
assert c.foo == 50
# check user set after theme
c = IntFuncDefault()
theme = dict(foo=30)
c.apply_theme(theme)
c.foo = 50
assert c.foo == 50
def test_has_props_dupe_prop() -> None:
try:
class DupeProps(hp.HasProps):
bar = AngleSpec()
bar_units = String()
except RuntimeError as e:
assert str(e) == "Two property generators both created DupeProps.bar_units"
else:
assert False
| IntFuncDefault |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common.py | {
"start": 16724,
"end": 27447
} | class ____:
"""Registry of command handlers for CLI.
Handler methods (callables) for user commands can be registered with this
class, which then is able to dispatch commands to the correct handlers and
retrieve the RichTextLines output.
For example, suppose you have the following handler defined:
def echo(argv, screen_info=None):
return RichTextLines(["arguments = %s" % " ".join(argv),
"screen_info = " + repr(screen_info)])
you can register the handler with the command prefix "echo" and alias "e":
registry = CommandHandlerRegistry()
registry.register_command_handler("echo", echo,
"Echo arguments, along with screen info", prefix_aliases=["e"])
then to invoke this command handler with some arguments and screen_info, do:
registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80})
or with the prefix alias:
registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80})
The call will return a RichTextLines object which can be rendered by a CLI.
"""
HELP_COMMAND = "help"
HELP_COMMAND_ALIASES = ["h"]
VERSION_COMMAND = "version"
VERSION_COMMAND_ALIASES = ["ver"]
def __init__(self):
# A dictionary from command prefix to handler.
self._handlers = {}
# A dictionary from prefix alias to prefix.
self._alias_to_prefix = {}
# A dictionary from prefix to aliases.
self._prefix_to_aliases = {}
# A dictionary from command prefix to help string.
self._prefix_to_help = {}
# Introductory text to help information.
self._help_intro = None
# Register a default handler for the command "help".
self.register_command_handler(
self.HELP_COMMAND,
self._help_handler,
"Print this help message.",
prefix_aliases=self.HELP_COMMAND_ALIASES)
# Register a default handler for the command "version".
self.register_command_handler(
self.VERSION_COMMAND,
self._version_handler,
"Print the versions of TensorFlow and its key dependencies.",
prefix_aliases=self.VERSION_COMMAND_ALIASES)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""Register a callable as a command handler.
Args:
prefix: Command prefix, i.e., the first word in a command, e.g.,
"print" as in "print tensor_1".
handler: A callable of the following signature:
foo_handler(argv, screen_info=None),
where argv is the argument vector (excluding the command prefix) and
screen_info is a dictionary containing information about the screen,
such as number of columns, e.g., {"cols": 100}.
The callable should return:
1) a RichTextLines object representing the screen output.
The callable can also raise an exception of the type CommandLineExit,
which if caught by the command-line interface, will lead to its exit.
The exception can optionally carry an exit token of arbitrary type.
help_info: A help string.
prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,
shorthands for the command prefix: ["p", "pr"]
Raises:
ValueError: If
1) the prefix is empty, or
2) handler is not callable, or
3) a handler is already registered for the prefix, or
4) elements in prefix_aliases clash with existing aliases.
5) help_info is not a str.
"""
if not prefix:
raise ValueError("Empty command prefix")
if prefix in self._handlers:
raise ValueError(
"A handler is already registered for command prefix \"%s\"" % prefix)
# Make sure handler is callable.
if not callable(handler):
raise ValueError("handler is not callable")
# Make sure that help info is a string.
if not isinstance(help_info, str):
raise ValueError("help_info is not a str")
# Process prefix aliases.
if prefix_aliases:
for alias in prefix_aliases:
if self._resolve_prefix(alias):
raise ValueError(
"The prefix alias \"%s\" clashes with existing prefixes or "
"aliases." % alias)
self._alias_to_prefix[alias] = prefix
self._prefix_to_aliases[prefix] = prefix_aliases
# Store handler.
self._handlers[prefix] = handler
# Store help info.
self._prefix_to_help[prefix] = help_info
def dispatch_command(self, prefix, argv, screen_info=None):
"""Handles a command by dispatching it to a registered command handler.
Args:
prefix: Command prefix, as a str, e.g., "print".
argv: Command argument vector, excluding the command prefix, represented
as a list of str, e.g.,
["tensor_1"]
screen_info: A dictionary containing screen info, e.g., {"cols": 100}.
Returns:
An instance of RichTextLines or None. If any exception is caught during
the invocation of the command handler, the RichTextLines will wrap the
error type and message.
Raises:
ValueError: If
1) prefix is empty, or
2) no command handler is registered for the command prefix, or
3) the handler is found for the prefix, but it fails to return a
RichTextLines or raise any exception.
CommandLineExit:
If the command handler raises this type of exception, this method will
simply pass it along.
"""
if not prefix:
raise ValueError("Prefix is empty")
resolved_prefix = self._resolve_prefix(prefix)
if not resolved_prefix:
raise ValueError("No handler is registered for command prefix \"%s\"" %
prefix)
handler = self._handlers[resolved_prefix]
try:
output = handler(argv, screen_info=screen_info)
except CommandLineExit as e:
raise e
except SystemExit as e:
# Special case for syntax errors caught by argparse.
lines = ["Syntax error for command: %s" % prefix,
"For help, do \"help %s\"" % prefix]
output = RichTextLines(lines)
except BaseException as e: # pylint: disable=broad-except
lines = ["Error occurred during handling of command: %s %s:" %
(resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))]
# Include traceback of the exception.
lines.append("")
lines.extend(traceback.format_exc().split("\n"))
output = RichTextLines(lines)
if not isinstance(output, RichTextLines) and output is not None:
raise ValueError(
"Return value from command handler %s is not None or a RichTextLines "
"instance" % str(handler))
return output
def is_registered(self, prefix):
"""Test if a command prefix or its alias is has a registered handler.
Args:
prefix: A prefix or its alias, as a str.
Returns:
True iff a handler is registered for prefix.
"""
return self._resolve_prefix(prefix) is not None
def get_help(self, cmd_prefix=None):
"""Compile help information into a RichTextLines object.
Args:
cmd_prefix: Optional command prefix. As the prefix itself or one of its
aliases.
Returns:
A RichTextLines object containing the help information. If cmd_prefix
is None, the return value will be the full command-line help. Otherwise,
it will be the help information for the specified command.
"""
if not cmd_prefix:
# Print full help information, in sorted order of the command prefixes.
help_info = RichTextLines([])
if self._help_intro:
# If help intro is available, show it at the beginning.
help_info.extend(self._help_intro)
sorted_prefixes = sorted(self._handlers)
for cmd_prefix in sorted_prefixes:
lines = self._get_help_for_command_prefix(cmd_prefix)
lines.append("")
lines.append("")
help_info.extend(RichTextLines(lines))
return help_info
else:
return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
def set_help_intro(self, help_intro):
"""Set an introductory message to help output.
Args:
help_intro: (RichTextLines) Rich text lines appended to the
beginning of the output of the command "help", as introductory
information.
"""
self._help_intro = help_intro
def _help_handler(self, args, screen_info=None):
"""Command handler for "help".
"help" is a common command that merits built-in support from this class.
Args:
args: Command line arguments to "help" (not including "help" itself).
screen_info: (dict) Information regarding the screen, e.g., the screen
width in characters: {"cols": 80}
Returns:
(RichTextLines) Screen text output.
"""
_ = screen_info # Unused currently.
if not args:
return self.get_help()
elif len(args) == 1:
return self.get_help(args[0])
else:
return RichTextLines(["ERROR: help takes only 0 or 1 input argument."])
def _version_handler(self, args, screen_info=None):
del args # Unused currently.
del screen_info # Unused currently.
return get_tensorflow_version_lines(include_dependency_versions=True)
def _resolve_prefix(self, token):
"""Resolve command prefix from the prefix itself or its alias.
Args:
token: a str to be resolved.
Returns:
If resolvable, the resolved command prefix.
If not resolvable, None.
"""
if token in self._handlers:
return token
elif token in self._alias_to_prefix:
return self._alias_to_prefix[token]
else:
return None
def _get_help_for_command_prefix(self, cmd_prefix):
"""Compile the help information for a given command prefix.
Args:
cmd_prefix: Command prefix, as the prefix itself or one of its aliases.
Returns:
A list of str as the help information for cmd_prefix. If the cmd_prefix
does not exist, the returned list of str will indicate that.
"""
lines = []
resolved_prefix = self._resolve_prefix(cmd_prefix)
if not resolved_prefix:
lines.append("Invalid command prefix: \"%s\"" % cmd_prefix)
return lines
lines.append(resolved_prefix)
if resolved_prefix in self._prefix_to_aliases:
lines.append(HELP_INDENT + "Aliases: " + ", ".join(
self._prefix_to_aliases[resolved_prefix]))
lines.append("")
help_lines = self._prefix_to_help[resolved_prefix].split("\n")
for line in help_lines:
lines.append(HELP_INDENT + line)
return lines
| CommandHandlerRegistry |
python | getsentry__sentry | src/sentry/tasks/auth/auth.py | {
"start": 6769,
"end": 8047
} | class ____(OrganizationComplianceTask):
log_label = "2FA"
def is_compliant(self, user: RpcUser) -> bool:
if user:
return user.has_2fa()
return False
def call_to_action(self, org: Organization, user: RpcUser, member: OrganizationMember):
# send invite to setup 2fa
email_context = {"url": member.get_invite_link(), "organization": org}
subject = "{} {} Mandatory: Enable Two-Factor Authentication".format(
options.get("mail.subject-prefix"), org.name.capitalize()
)
message = MessageBuilder(
subject=subject,
template="sentry/emails/setup_2fa.txt",
html_template="sentry/emails/setup_2fa.html",
type="user.setup_2fa",
context=email_context,
)
message.send_async([member.get_email()])
@instrumented_task(
name="sentry.tasks.remove_2fa_non_compliant_members",
namespace=auth_tasks,
retry=Retry(
delay=60 * 5,
),
silo_mode=SiloMode.REGION,
)
@retry
def remove_2fa_non_compliant_members(org_id, actor_id=None, actor_key_id=None, ip_address=None):
TwoFactorComplianceTask().remove_non_compliant_members(
org_id, actor_id, actor_key_id, ip_address
)
| TwoFactorComplianceTask |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_coding_agents.py | {
"start": 13668,
"end": 22484
} | class ____(BaseOrganizationCodingAgentsTest):
"""Test class for POST endpoint parameter validation."""
def test_feature_flag_disabled(self):
"""Test POST endpoint when feature flag is disabled."""
data = {"integration_id": "123", "run_id": 123}
response = self.get_error_response(
self.organization.slug, method="post", status_code=403, **data
)
# POST returns plain string for disabled feature (403 PermissionDenied)
assert response.data["detail"] == "Feature not available"
def test_missing_integration_id(self):
"""Test POST endpoint with missing integration_id."""
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400
)
# Serializer returns field error mapping
assert "integration_id" in response.data
assert "run_id" in response.data
def test_invalid_integration_id(self):
"""Test POST endpoint with invalid integration_id."""
data = {"integration_id": "invalid_id"}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
assert "integration_id" in response.data
def test_non_coding_agent_integration(self):
"""Test POST endpoint with non-coding agent integration."""
# Create a non-coding agent integration (e.g., Slack)
slack_integration = self.create_integration(
organization=self.organization,
provider="slack",
name="Slack",
external_id="slack:123",
)
data = {"integration_id": str(slack_integration.id), "run_id": 123}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
# DRF ValidationError returns a list for non-field errors
assert response.data[0] == "Not a coding agent integration"
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
def test_integration_not_found(self, mock_get_org_integration, mock_get_providers):
"""Test POST endpoint with integration that doesn't exist."""
mock_get_providers.return_value = ["github"]
mock_get_org_integration.return_value = None
data = {"integration_id": "999", "run_id": 123}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=404, **data
)
assert response.data["detail"] == "Integration not found"
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
def test_inactive_integration(self, mock_get_org_integration, mock_get_providers):
"""Test POST endpoint with inactive integration."""
mock_get_providers.return_value = ["github"]
# Create inactive organization integration
inactive_org_integration = MagicMock()
inactive_org_integration.status = ObjectStatus.PENDING_DELETION
mock_get_org_integration.return_value = inactive_org_integration
data = {"integration_id": str(self.integration.id), "run_id": 123}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=404, **data
)
assert response.data["detail"] == "Integration not found"
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_empty_run_id(self, mock_get_integration, mock_get_org_integration, mock_get_providers):
"""Test POST endpoint with empty run_id."""
mock_get_providers.return_value = ["github"]
mock_get_org_integration.return_value = self.rpc_org_integration
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_integration.return_value = mock_rpc_integration
data = {"integration_id": str(self.integration.id), "run_id": ""}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
assert "run_id" in response.data
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_null_run_id(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_providers,
):
"""Test POST endpoint with null run_id."""
mock_get_providers.return_value = ["github"]
mock_get_org_integration.return_value = self.rpc_org_integration
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_integration.return_value = mock_rpc_integration
data = {"integration_id": str(self.integration.id), "run_id": None}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
assert "run_id" in response.data
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_string_run_id(
self, mock_get_integration, mock_get_org_integration, mock_get_providers
):
"""Test POST endpoint with non-numeric run_id."""
mock_get_providers.return_value = ["github"]
mock_get_org_integration.return_value = self.rpc_org_integration
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_integration.return_value = mock_rpc_integration
data = {"integration_id": str(self.integration.id), "run_id": "not_a_number"}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
assert "run_id" in response.data
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_invalid_coding_agent_installation(
self, mock_get_integration, mock_get_org_integration, mock_get_providers
):
"""Test POST endpoint when installation is not a CodingAgentIntegration."""
mock_get_providers.return_value = ["github"]
mock_get_org_integration.return_value = self.rpc_org_integration
# Mock integration that returns non-CodingAgentIntegration
mock_rpc_integration = MagicMock()
mock_rpc_integration.id = self.integration.id
mock_rpc_integration.provider = "github"
mock_rpc_integration.get_installation.return_value = (
MagicMock()
) # Not CodingAgentIntegration
mock_get_integration.return_value = mock_rpc_integration
data = {"integration_id": str(self.integration.id), "run_id": 123}
with self.feature({"organizations:seer-coding-agent-integrations": True}):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
# DRF ValidationError returns a list for non-field errors
assert response.data[0] == "Invalid coding agent integration"
| OrganizationCodingAgentsPostParameterValidationTest |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 25417,
"end": 25601
} | class ____(RuntimeError):
"""
Exception raised when clipboard functionality is unsupported.
Raised by ``to_clipboard()`` and ``read_clipboard()``.
"""
| PyperclipException |
python | numba__numba | numba/tests/test_jitclasses.py | {
"start": 871,
"end": 1164
} | class ____(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
def _get_meminfo(box):
ptr = _box.box_get_meminfoptr(box)
mi = MemInfo(ptr)
mi.acquire()
return mi
| TestClass2 |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/envs/unity_parallel_env.py | {
"start": 229,
"end": 1594
} | class ____(UnityPettingzooBaseEnv, ParallelEnv):
"""
Unity Parallel (PettingZoo) environment wrapper.
"""
def __init__(self, env: BaseEnv, seed: Optional[int] = None):
"""
Initializes a Unity Parallel environment wrapper.
:param env: The UnityEnvironment that is being wrapped.
:param seed: The seed for the action spaces of the agents.
"""
super().__init__(env, seed)
def reset(self) -> Dict[str, Any]:
"""
Resets the environment.
"""
super().reset()
return self._observations
def step(self, actions: Dict[str, Any]) -> Tuple:
self._assert_loaded()
if len(self._live_agents) <= 0 and actions:
raise error.Error(
"You must reset the environment before you can perform a step."
)
# Process actions
for current_agent, action in actions.items():
self._process_action(current_agent, action)
# Reset reward
for k in self._rewards.keys():
self._rewards[k] = 0
# Step environment
self._step()
# Agent cleanup and sorting
self._cleanup_agents()
self._live_agents.sort() # unnecessary, only for passing API test
return self._observations, self._rewards, self._dones, self._infos
| UnityParallelEnv |
python | tensorflow__tensorflow | tensorflow/lite/python/optimize/calibrator.py | {
"start": 1642,
"end": 9760
} | class ____:
"""Calibrates a floating point model and then quantizes it.
This is an internal class, not a public interface.
"""
def __init__(
self,
model_content,
custom_op_registerers_by_name=None,
custom_op_registerers_by_func=None,
):
"""Constructor.
Args:
model_content: Content of a TF-Lite Flatbuffer file.
custom_op_registerers_by_name: List of str (symbol names) that take a
pointer to a MutableOpResolver and register custom ops.
custom_op_registerers_by_func: List of functions that take a pointer to a
MutableOpResolver and register custom ops.
Raises:
ValueError: If the calibrator was unable to open the model.
"""
if not model_content:
raise ValueError("`model_content` must be specified.")
if custom_op_registerers_by_name is None:
custom_op_registerers_by_name = []
if custom_op_registerers_by_func is None:
custom_op_registerers_by_func = []
try:
self._calibrator = _calibration_wrapper.CalibrationWrapper(
model_content,
custom_op_registerers_by_name,
custom_op_registerers_by_func,
)
self._model_content = model_content
except Exception as e:
raise ValueError("Failed to parse the model: %s." % e)
if not self._calibrator:
raise ValueError("Failed to parse the model.")
self._interpreter = None
def _create_input_array_from_dict(self, signature_key, inputs):
input_array = []
signature_runner = self._interpreter.get_signature_runner(signature_key)
input_details = sorted(
signature_runner.get_input_details().items(),
key=lambda item: item[1]["index"],
)
for input_name, _ in input_details:
input_array.append(inputs[input_name])
return input_array
def _feed_tensors(self, dataset_gen, resize_input):
"""Feed tensors to the calibrator."""
initialized = {}
for sample in dataset_gen():
if isinstance(sample, tuple):
if not isinstance(sample[1], dict):
raise ValueError(
"You need to provide either a dictionary with input "
"names and values in the second argument in the "
"tuple"
)
# Convert signature based inputs to the tensor index based data.
if self._interpreter is None:
self._interpreter = Interpreter(model_content=self._model_content)
signature_key = sample[0]
input_array = self._create_input_array_from_dict(
signature_key, sample[1]
)
elif isinstance(sample, dict):
# Convert signature based inputs to the tensor index based data.
if self._interpreter is None:
self._interpreter = Interpreter(model_content=self._model_content)
signature_key = None
input_array = self._create_input_array_from_dict(None, sample)
elif isinstance(sample, list):
signature_key = None
input_array = sample
else:
raise ValueError(
"You need to provide either a dictionary with input "
"names and values, a tuple with signature key and a "
"dictionary with input names and values, or an array "
"with input values in the order of input tensors of "
"the graph in the representative_dataset function. "
"Unsupported value from dataset: {}.".format(sample)
)
if signature_key not in initialized:
initialized[signature_key] = True
if resize_input:
if signature_key is not None:
self._calibrator.Prepare(
[list(s.shape) for s in input_array], signature_key
)
else:
self._calibrator.Prepare([list(s.shape) for s in input_array])
else:
if signature_key is not None:
self._calibrator.Prepare(signature_key)
else:
self._calibrator.Prepare()
if signature_key is not None:
self._calibrator.FeedTensor(input_array, signature_key)
else:
self._calibrator.FeedTensor(input_array)
@convert_phase(
Component.OPTIMIZE_TFLITE_MODEL,
SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER,
)
def calibrate_and_quantize(
self,
dataset_gen,
input_type,
output_type,
allow_float,
activations_type=dtypes.int8,
bias_type=dtypes.int32,
resize_input=True,
disable_per_channel=False,
disable_per_channel_quantization_for_dense_layers=False,
):
"""Calibrates the model with specified generator and then quantizes it.
The input shapes of the calibrator are resized with the calibration data if
`resize_input` is set.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend. If False, an
error will be thrown if an operation cannot be quantized, otherwise the
model will fallback to float ops.
activations_type: A tf.dtype representing the desired type for
activations.
bias_type: A tf.dtype representing the desired type for bias.
resize_input: A boolean. True if the shape of the sample data is different
from the input.
disable_per_channel: A boolean. True if disabling per-channel
quantization.
disable_per_channel_quantization_for_dense_layers: A boolean. True if
disabling per-channel quantization only in Dense layers.
"""
self._feed_tensors(dataset_gen, resize_input)
return self._calibrator.QuantizeModel(
np.dtype(input_type.as_numpy_dtype()).num,
np.dtype(output_type.as_numpy_dtype()).num,
allow_float,
np.dtype(activations_type.as_numpy_dtype()).num,
np.dtype(bias_type.as_numpy_dtype()).num,
disable_per_channel,
disable_per_channel_quantization_for_dense_layers,
)
@convert_phase(
Component.OPTIMIZE_TFLITE_MODEL,
SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER,
)
def calibrate_and_quantize_single(
self,
dataset_gen,
input_type,
output_type,
allow_float,
op_output_name,
resize_input=True,
):
"""Calibrates the model with specified generator and then quantizes it.
Only the single op with output op_output_name will be quantized.
The input shapes of the calibrator are resized with the calibration data.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend. If False, an
error will be thrown if an operation cannot be quantized, otherwise the
model will fallback to float ops.
op_output_name: A string, only this op will be quantized.
resize_input: A boolean. True if the shape of the sample data is different
from the input.
"""
self._feed_tensors(dataset_gen, resize_input)
return self._calibrator.QuantizeModel(
np.dtype(input_type.as_numpy_dtype()).num,
np.dtype(output_type.as_numpy_dtype()).num,
allow_float,
op_output_name,
)
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE)
def calibrate(self, dataset_gen):
"""Calibrates the model with specified generator.
Returns:
A model with min and max calibration stats.
Args:
dataset_gen: A generator that generates calibration samples.
"""
self._feed_tensors(dataset_gen, resize_input=True)
return self._calibrator.Calibrate()
| Calibrator |
python | doocs__leetcode | solution/3200-3299/3296.Minimum Number of Seconds to Make Mountain Height Zero/Solution.py | {
"start": 0,
"end": 348
} | class ____:
def minNumberOfSeconds(self, mountainHeight: int, workerTimes: List[int]) -> int:
def check(t: int) -> bool:
h = 0
for wt in workerTimes:
h += int(sqrt(2 * t / wt + 1 / 4) - 1 / 2)
return h >= mountainHeight
return bisect_left(range(10**16), True, key=check)
| Solution |
python | pdm-project__pdm | src/pdm/exceptions.py | {
"start": 196,
"end": 244
} | class ____(PdmException):
pass
| ResolutionError |
python | realpython__materials | inheritance-and-composition/choosing/productivity.py | {
"start": 0,
"end": 625
} | class ____:
def __init__(self):
self._roles = {
"manager": ManagerRole,
"secretary": SecretaryRole,
"sales": SalesRole,
"factory": FactoryRole,
}
def get_role(self, role_id):
role_type = self._roles.get(role_id)
if not role_type:
raise ValueError("role_id")
return role_type()
def track(self, employees, hours):
print("Tracking Employee Productivity")
print("==============================")
for employee in employees:
employee.work(hours)
print("")
| _ProductivitySystem |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 20650,
"end": 25090
} | class ____:
def setup(self, *args, **kwargs):
"""
The custom backend does the bare minimum to be considered a lazy backend. But
the data in it is still in memory so slow file reading shouldn't affect the
results.
"""
requires_dask()
@dataclass
class PerformanceBackendArray(xr.backends.BackendArray):
filename_or_obj: str | os.PathLike | None
shape: tuple[int, ...]
dtype: np.dtype
lock: xr.backends.locks.SerializableLock
def __getitem__(self, key: tuple):
return xr.core.indexing.explicit_indexing_adapter(
key,
self.shape,
xr.core.indexing.IndexingSupport.BASIC,
self._raw_indexing_method,
)
def _raw_indexing_method(self, key: tuple):
raise NotImplementedError
@dataclass
class PerformanceStore(xr.backends.common.AbstractWritableDataStore):
manager: xr.backends.CachingFileManager
mode: str | None = None
lock: xr.backends.locks.SerializableLock | None = None
autoclose: bool = False
def __post_init__(self):
self.filename = self.manager._args[0]
@classmethod
def open(
cls,
filename: str | os.PathLike | None,
mode: str = "r",
lock: xr.backends.locks.SerializableLock | None = None,
autoclose: bool = False,
):
locker = lock or xr.backends.locks.SerializableLock()
manager = xr.backends.CachingFileManager(
xr.backends.DummyFileManager,
filename,
mode=mode,
)
return cls(manager, mode=mode, lock=locker, autoclose=autoclose)
def load(self) -> tuple:
"""
Load a bunch of test data quickly.
Normally this method would've opened a file and parsed it.
"""
n_variables = 2000
# Important to have a shape and dtype for lazy loading.
shape = (1000,)
dtype = np.dtype(int)
variables = {
f"long_variable_name_{v}": xr.Variable(
data=PerformanceBackendArray(
self.filename, shape, dtype, self.lock
),
dims=("time",),
fastpath=True,
)
for v in range(n_variables)
}
attributes = {}
return variables, attributes
class PerformanceBackend(xr.backends.BackendEntrypoint):
def open_dataset(
self,
filename_or_obj: str | os.PathLike | None,
drop_variables: tuple[str, ...] | None = None,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
use_cftime=None,
decode_timedelta=None,
lock=None,
**kwargs,
) -> xr.Dataset:
filename_or_obj = xr.backends.common._normalize_path(filename_or_obj)
store = PerformanceStore.open(filename_or_obj, lock=lock)
store_entrypoint = xr.backends.store.StoreBackendEntrypoint()
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
self.engine = PerformanceBackend
@parameterized(["chunks"], ([None, {}, {"time": 10}]))
def time_open_dataset(self, chunks):
"""
Time how fast xr.open_dataset is without the slow data reading part.
Test with and without dask.
"""
xr.open_dataset(None, engine=self.engine, chunks=chunks)
| IOReadCustomEngine |
python | doocs__leetcode | solution/2400-2499/2451.Odd String Difference/Solution.py | {
"start": 0,
"end": 274
} | class ____:
def oddString(self, words: List[str]) -> str:
d = defaultdict(list)
for s in words:
t = tuple(ord(b) - ord(a) for a, b in pairwise(s))
d[t].append(s)
return next(ss[0] for ss in d.values() if len(ss) == 1)
| Solution |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 60031,
"end": 61249
} | class ____(PreTrainedModel):
config: Zamba2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Zamba2AttentionDecoderLayer", "Zamba2MambaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
# Note: only supports Zamba2HybridDynamicCache
_is_stateful = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Zamba2MambaMixer):
dt = torch.exp(
torch.rand(self.config.n_mamba_heads)
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ math.log(self.config.time_step_min)
).clamp(min=self.config.time_step_floor)
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
inv_dt = dt + torch.log(-torch.expm1(-dt))
init.copy_(module.dt_bias, inv_dt)
A = torch.arange(1, module.num_heads + 1)
init.copy_(module.A_log, torch.log(A))
init.ones_(module.D)
@auto_docstring
| Zamba2PreTrainedModel |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/protocol.py | {
"start": 1174,
"end": 1378
} | class ____:
Inactive = "inactive"
Connecting = "connecting"
Connected = "connected"
Starting = "starting"
Active = "active"
Stopping = "stopping"
Error = "error"
| ConnectionStatus |
python | doocs__leetcode | solution/1000-1099/1099.Two Sum Less Than K/Solution.py | {
"start": 0,
"end": 293
} | class ____:
def twoSumLessThanK(self, nums: List[int], k: int) -> int:
nums.sort()
ans = -1
for i, x in enumerate(nums):
j = bisect_left(nums, k - x, lo=i + 1) - 1
if i < j:
ans = max(ans, x + nums[j])
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/test_utilities.py | {
"start": 3665,
"end": 7019
} | class ____:
def test_succeeded_responds_true(self):
"""Desired behavior: `succeeded()` should return true if execution.status
contains a list element that is a dict with a key "type" and a value of
"Completed" and a key "status" and a value of "True"
"""
execution = Execution(
name="Test",
namespace="test-namespace",
metadata={},
spec={},
status={"conditions": [{"type": "Completed", "status": "True"}]},
log_uri="",
)
assert execution.succeeded()
@pytest.mark.parametrize(
"conditions",
[
[],
[{"type": "Dog", "status": "True"}],
[{"type": "Completed", "status": "False"}],
[{"type": "Completed", "status": "Dog"}],
],
)
def test_succeeded_responds_false(self, conditions):
"""Desired behavior: `succeeded()` should return False if execution.status
lacks a list element that is a dict with a key "type" and a value of
"Completed" and a key "status" and a value of "True".
This could be a situation where there is no element containing the key, or
the element with the key has a status that is not "True".
"""
execution = Execution(
name="Test",
namespace="test-namespace",
metadata={},
spec={},
status={"conditions": conditions},
log_uri="",
)
assert not execution.succeeded()
@pytest.mark.parametrize(
"status,expected_value", [({}, True), ({"completionTime": "xyz"}, False)]
)
def test_is_running(self, status, expected_value):
"""Desired behavior: `is_running()` should return True if there if
execution.status lack a key "completionTime", otherwise return False
"""
execution = Execution(
name="Test",
namespace="test-namespace",
metadata={},
spec={},
status=status,
log_uri="",
)
assert execution.is_running() == expected_value
@pytest.mark.parametrize(
"conditions, expected_value",
[
([], None),
([{"type": "Dog", "status": "True"}], None),
(
[{"type": "Completed", "status": "False"}],
{"type": "Completed", "status": "False"},
),
(
[
{"type": "Dog", "status": "True"},
{"type": "Completed", "status": "False"},
],
{"type": "Completed", "status": "False"},
),
],
)
def test_condition_after_completion_returns_correct_condition(
self, conditions, expected_value
):
"""Desired behavior: `condition_after_completion()`
should return the list element from execution.status that contains a dict
with a key "type" and a value of "Completed" if it exists, else None
"""
execution = Execution(
name="Test",
namespace="test-namespace",
metadata={},
spec={},
status={"conditions": conditions},
log_uri="",
)
assert execution.condition_after_completion() == expected_value
| TestExecution |
python | Textualize__textual | src/textual/validation.py | {
"start": 17642,
"end": 18715
} | class ____(Validator):
"""Validator that checks if a URL is valid (ensuring a scheme is present)."""
class InvalidURL(Failure):
"""Indicates that the URL is not valid."""
def validate(self, value: str) -> ValidationResult:
"""Validates that `value` is a valid URL (contains a scheme).
Args:
value: The value to validate.
Returns:
The result of the validation.
"""
invalid_url = ValidationResult.failure([URL.InvalidURL(self, value)])
try:
parsed_url = urlparse(value)
if not all([parsed_url.scheme, parsed_url.netloc]):
return invalid_url
except ValueError:
return invalid_url
return self.success()
def describe_failure(self, failure: Failure) -> str | None:
"""Describes why the validator failed.
Args:
failure: Information about why the validation failed.
Returns:
A string description of the failure.
"""
return "Must be a valid URL."
| URL |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 12295,
"end": 12666
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
text_document: TextDocumentItem
@staticmethod
def from_json_rpc_parameters(
parameters: json_rpc.Parameters,
) -> "DidOpenTextDocumentParameters":
return _parse_parameters(parameters, target=DidOpenTextDocumentParameters)
@dataclasses.dataclass(frozen=True)
| DidOpenTextDocumentParameters |
python | pytorch__pytorch | test/mobile/lightweight_dispatch/tests_setup.py | {
"start": 1960,
"end": 2278
} | class ____(torch.nn.Upsample):
def __init__(self) -> None:
super().__init__(
scale_factor=(2.0,),
mode="linear",
align_corners=False,
recompute_scale_factor=True,
)
# index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
@save_model
| ModelWithFloatList |
python | google__jax | jax/experimental/jax2tf/tests/model_harness.py | {
"start": 1568,
"end": 13870
} | class ____:
name: str
apply: Callable[..., Any]
variables: dict[str, Any]
inputs: Sequence[np.ndarray]
rtol: float = 1e-4
polymorphic_shapes: Sequence[str | None] | None = None
tensor_spec: Sequence[tf.TensorSpec] | None = None
def __post_init__(self):
# When providing polymorphic shapes, tensor_spec should be provided as well.
assert bool(self.polymorphic_shapes) == bool(self.tensor_spec)
@property
def tf_input_signature(self):
def _to_tensorspec(x):
return tf.TensorSpec(x.shape, tf.dtypes.as_dtype(x.dtype))
if self.tensor_spec:
return self.tensor_spec
else:
return jax.tree_util.tree_map(_to_tensorspec, self.inputs)
def apply_with_vars(self, *args, **kwargs):
return self.apply(self.variables, *args, **kwargs)
##### All harnesses in this file.
ALL_HARNESSES: dict[str, Callable[[str], ModelHarness]] = {}
def _make_harness(harness_fn, name, poly_shapes=None, tensor_specs=None):
"""Partially apply harness in order to create variables lazily.
Note: quotes and commas are stripped from `name` to ensure they can be passed
through the command-line.
"""
if poly_shapes:
name += "_" + re.sub(r"(?:'|\"|,)", "", str(poly_shapes))
if tensor_specs:
tensor_specs = [tf.TensorSpec(spec, dtype) for spec, dtype in tensor_specs]
partial_fn = functools.partial(
harness_fn,
name=name,
polymorphic_shapes=poly_shapes,
tensor_spec=tensor_specs)
if name in ALL_HARNESSES:
raise ValueError(f"Harness {name} exists already")
ALL_HARNESSES[name] = partial_fn
######################## Model Harness Definitions #############################
def _actor_critic_harness(name, **kwargs):
model = actor_critic.ActorCritic(num_outputs=8)
x = np.zeros((1, 84, 84, 4), np.float32)
variables = model.init(random.PRNGKey(0), x)
return ModelHarness(name, model.apply, variables, [x], **kwargs)
def _bilstm_harness(name, **kwargs):
model = bilstm_classifier.TextClassifier(
# TODO(marcvanzee): This fails when
# `embedding_size != hidden_size`. I suppose some arrays are
# concatenated with incompatible shapes, which could mean
# something is going wrong in the translation.
embedding_size=3,
hidden_size=1,
vocab_size=13,
output_size=1,
dropout_rate=0.,
word_dropout_rate=0.)
x = np.array([[2, 4, 3], [2, 6, 3]], np.int32)
lengths = np.array([2, 3], np.int32)
variables = model.init(random.PRNGKey(0), x, lengths, deterministic=True)
apply = functools.partial(model.apply, deterministic=True)
return ModelHarness(name, apply, variables, [x, lengths], **kwargs)
def _cnn_harness(name, **kwargs):
model = cnn.CNN()
x = np.zeros((1, 28, 28, 1), np.float32)
variables = model.init(random.PRNGKey(0), x)
return ModelHarness(name, model.apply, variables, [x], **kwargs)
def _get_gnn_graphs():
n_node = np.arange(3, 11)
n_edge = np.arange(4, 12)
total_n_node = np.sum(n_node)
total_n_edge = np.sum(n_edge)
n_graph = n_node.shape[0]
feature_dim = 10
graphs = jraph.GraphsTuple(
n_node=n_node,
n_edge=n_edge,
senders=np.zeros(total_n_edge, dtype=np.int32),
receivers=np.ones(total_n_edge, dtype=np.int32),
nodes=np.ones((total_n_node, feature_dim)),
edges=np.zeros((total_n_edge, feature_dim)),
globals=np.zeros((n_graph, feature_dim)),
)
return graphs
def _gnn_harness(name, **kwargs):
# Setting taken from flax/examples/ogbg_molpcba/models_test.py.
rngs = {
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(1),
}
graphs = _get_gnn_graphs()
model = gnn.GraphNet(
latent_size=5,
num_mlp_layers=2,
message_passing_steps=2,
output_globals_size=15,
use_edge_model=True)
variables = model.init(rngs, graphs)
return ModelHarness(name, model.apply, variables, [graphs], rtol=2e-4,
**kwargs)
def _gnn_conv_harness(name, **kwargs):
# Setting taken from flax/examples/ogbg_molpcba/models_test.py.
rngs = {
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(1),
}
graphs = _get_gnn_graphs()
model = gnn.GraphConvNet(
latent_size=5,
num_mlp_layers=2,
message_passing_steps=2,
output_globals_size=5)
variables = model.init(rngs, graphs)
return ModelHarness(name, model.apply, variables, [graphs], **kwargs)
def _resnet50_harness(name, **kwargs):
model = resnet.ResNet50(num_classes=2, dtype=np.float32)
x = np.zeros((8, 16, 16, 3), np.float32)
variables = model.init(random.PRNGKey(0), x)
apply = functools.partial(model.apply, train=False, mutable=False)
return ModelHarness(name, apply, variables, [x], **kwargs)
def _seq2seq_lstm_harness(name, **kwargs):
model = seq2seq_lstm.Seq2seq(teacher_force=True, hidden_size=2, vocab_size=4)
encoder_inputs = np.zeros((1, 2, 4), np.float32) # [batch, inp_len, vocab]
decoder_inputs = np.zeros((1, 3, 4), np.float32) # [batch, outp_len, vocab]
rngs = {
'params': random.PRNGKey(0),
'lstm': random.PRNGKey(1),
}
xs = [encoder_inputs, decoder_inputs]
variables = model.init(rngs, *xs)
apply = functools.partial(model.apply, rngs={'lstm': random.PRNGKey(2)})
return ModelHarness(name, apply, variables, xs, **kwargs)
def _min_transformer_kwargs():
return dict(
vocab_size=8,
output_vocab_size=8,
emb_dim = 4,
num_heads= 1,
num_layers = 1,
qkv_dim= 2,
mlp_dim = 2,
max_len = 2,
dropout_rate = 0.,
attention_dropout_rate = 0.)
def _full_transformer_kwargs():
kwargs = dict(
decode = True,
deterministic = True,
logits_via_embedding=False,
share_embeddings=False)
return {**kwargs, **_min_transformer_kwargs()}
def _transformer_lm1b_harness(name, **kwargs):
config = lm1b.TransformerConfig(**_full_transformer_kwargs())
model = lm1b.TransformerLM(config=config)
x = np.zeros((2, 1), np.float32)
rng1, rng2 = random.split(random.PRNGKey(0))
variables = model.init(rng1, x)
def apply(*args):
# Don't return the new state (containing the cache).
output, _ = model.apply(*args, rngs={'cache': rng2}, mutable=['cache'])
return output
return ModelHarness(name, apply, variables, [x], **kwargs)
def _transformer_nlp_seq_harness(name, **kwargs):
config = nlp_seq.TransformerConfig(**_min_transformer_kwargs())
model = nlp_seq.Transformer(config=config)
x = np.zeros((2, 1), np.float32)
variables = model.init(random.PRNGKey(0), x, train=False)
apply = functools.partial(model.apply, train=False)
return ModelHarness(name, apply, variables, [x], **kwargs)
def _transformer_wmt_harness(name, **kwargs):
config = wmt.TransformerConfig(**_full_transformer_kwargs())
model = wmt.Transformer(config=config)
x = np.zeros((2, 1), np.float32)
variables = model.init(random.PRNGKey(0), x, x)
def apply(*args):
# Don't return the new state (containing the cache).
output, _ = model.apply(*args, mutable=['cache'])
return output
return ModelHarness(name, apply, variables, [x, x], **kwargs)
def _vae_harness(name, **kwargs):
model = vae.VAE(latents=3)
x = np.zeros((1, 8, 8, 3), np.float32)
rng1, rng2 = random.split(random.PRNGKey(0))
variables = model.init(rng1, x, rng2)
generate = lambda v, x: model.apply(v, x, method=model.generate)
return ModelHarness(name, generate, variables, [x], **kwargs)
####################### Model Harness Construction #############################
# actor_critic input spec: [((1, 84, 84, 4), np.float32)].
for poly_shapes, tensor_specs in [
(None, None), # No polymorphism.
# batch polymorphism.
(["(b, ...)"], [((None, 84, 84, 4), tf.float32)]),
# Dependent shapes for spatial dims.
# TODO(marcvanzee): Figure out the right multiple for these dimensions.
(["(_, 4*b, 4*b, _)"], [((1, None, None, 4), tf.float32)]),
]:
_make_harness(
harness_fn=_actor_critic_harness,
name="flax/actor_critic",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# bilstm input specs: [((2, 3), np.int32), ((2,), np.int32)] = [inputs, lengths]
for poly_shapes, tensor_specs in [
(None, None),
# batch polymorphism
(["(b, _)", "(_,)"], [((None, 3), tf.int32), ((2,), tf.int32)]),
# dynamic input lengths
(["(_, _)", "(b,)"], [((2, 3), tf.int32), ((None,), tf.int32)]),
]:
_make_harness(
harness_fn=_bilstm_harness,
name="flax/bilstm",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# cnn input spec: [((1, 28, 28, 1), np.float32)].
for poly_shapes, tensor_specs in [
(None, None), # No polymorphism.
# batch polymorphism.
(["(b, ...)"], [((None, 28, 28, 1), tf.float32)]),
# Dependent shapes for spatial dims.
# TODO(marcvanzee): Figure out the right multiple for these dimensions.
(["(_, b, b, _)"], [((1, None, None, 1), tf.float32)]),
]:
_make_harness(
harness_fn=_cnn_harness,
name="flax/cnn",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# We do not support polymorphism for the GNN examples since they use GraphTuples
# as input rather than regular arrays.
_make_harness(harness_fn=_gnn_harness, name="flax/gnn")
_make_harness(harness_fn=_gnn_conv_harness, name="flax/gnn_conv")
# resnet50 input spec: [((8, 16, 16, 3), np.float32)]
for poly_shapes, tensor_specs in [
(None, None), # No polymorphism.
# batch polymorphism.
(["(b, ...)"], [((None, 16, 16, 3), tf.float32)]),
# Dependent shapes for spatial dims.
# TODO(marcvanzee): Figure out the right multiple for these dimensions.
(["(_, 4*b, 4*b, _)"], [((8, None, None, 3), tf.float32)]),
]:
_make_harness(
harness_fn=_resnet50_harness,
name="flax/resnet50",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# seq2seq input specs (we use the same input and output lengths for now):
# [
# ((1, 2, 4), np.float32), # encoder inp: [batch, max_input_len, vocab_size]
# ((1, 3, 4), np.float32), # decoder_inp: [batch, max_output_len, vocab_size]
# ]
for poly_shapes, tensor_specs in [
(None, None),
# batch polymorphism
(
["(b, _, _)", "(b, _, _)"],
[((None, 2, 4), tf.float32), ((None, 3, 4), tf.float32)],
),
# dynamic input lengths
(
["(_, b, _)", "(_, _, _)"],
[((1, None, 4), tf.float32), ((1, 3, 4), tf.float32)],
),
# dynamic output lengths
(
["(_, _, _)", "(_, b, _)"],
[((1, 2, 4), tf.float32), ((1, None, 4), tf.float32)],
),
]:
_make_harness(
harness_fn=_seq2seq_lstm_harness,
name="flax/seq2seq_lstm",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# lm1b/nlp_seq input spec: [((2, 1), np.float32)] [batch, seq_len]
for poly_shapes, tensor_specs in [
(None, None),
# batch polymorphism.
(["(b, _)"], [((None, 1), tf.float32)]),
]:
for name, harness_fn in [
("flax/lm1b", _transformer_lm1b_harness),
("flax/nlp_seq", _transformer_nlp_seq_harness)
]:
_make_harness(
harness_fn=harness_fn,
name=name,
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# wmt input spec (both inputs have the same shape):
# [
# ((1, 2), np.float32), # inputs: [batch, max_target_len]
# ((1, 2), np.float32), # targets: [batch, max_target_len]
# ]
for poly_shapes, tensor_specs in [
(None, None),
# batch polymorphism.
(["(b, _)"] * 2, [((None, 1), tf.float32)] * 2),
# dynamic lengths.
(["(_, b)"] * 2, [((1, None), tf.float32)] * 2),
]:
_make_harness(
harness_fn=_transformer_wmt_harness,
name="flax/wmt",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
# vae input spec: [((1, 8, 8, 3), np.float32)].
for poly_shapes, tensor_specs in [
(None, None), # No polymorphism.
# batch polymorphism.
(["(b, ...)"], [((None, 8, 8, 3), tf.float32)]),
# Dependent shapes for spatial dims.
# TODO(marcvanzee): Figure out the right multiple for these dimensions.
(["(_, b, b, _)"], [((1, None, None, 3), tf.float32)]),
]:
_make_harness(
harness_fn=_vae_harness,
name="flax/vae",
poly_shapes=poly_shapes,
tensor_specs=tensor_specs)
| ModelHarness |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_comm_hooks.py | {
"start": 2506,
"end": 3901
} | class ____:
def dummy_hook_for_no_shard_fsdp(self, state: DummyState, grad: torch.Tensor):
"""
This communication hook is for illustration and testing purpose only.
This communication hook is used during FSDP ``NO_SHARD`` training. It adds some noise to
the provided ``grad`` parameter and uses ``all_reduce`` to communicate full, flattened,
unsharded gradient.
"""
grad.add_(state.noise)
dist.all_reduce(grad, group=state.process_group)
def custom_reduce_scatter(self, output, input, group=None):
"""
This function is for illustrative purpose only.
It is meant to implement a custom reduce-scatter
of a flattened tensor to all processes in a group.
Currently a no-op.
"""
def dummy_hook_for_sharded_fsdp(
self, state: DummyState, grad: torch.Tensor, output: torch.Tensor
):
"""
This communication hook is for illustration and testing purposes only.
This communication hook is used during FSDP ``FULL_SHARD`` or ``SHARD_GRAD_OP`` training.
It adds some noise to the provided ``grad`` parameter, uses
``reduce_scatter`` for gradient communication and stores a sharded gradient in ``output``.
"""
grad.add_(state.noise)
self.custom_reduce_scatter(output, grad, group=state.process_group)
| DummyHook |
python | getsentry__sentry | tests/sentry/db/models/fields/test_slug.py | {
"start": 1698,
"end": 3859
} | class ____(TestCase):
def setUp(self) -> None:
self.compiler = Mock()
# Simulate the quoting behavior for simplicity in tests
self.compiler.quote_name_unless_alias = lambda name: (
f"{name}" if '"' in name else f'"{name}"'
)
self.connection = Mock()
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_rhs")
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_lhs")
def test_as_sql_with_numeric_rhs(
self, mock_process_lhs: MagicMock, mock_process_rhs: MagicMock
) -> None:
mock_process_lhs.return_value = ('"table"."id"', [])
mock_process_rhs.return_value = ("%s", ["123"])
lookup = IdOrSlugLookup("id__id_or_slug", "123")
sql, params = lookup.as_sql(self.compiler, self.connection)
self.assertEqual(sql, '"table"."id" = %s')
self.assertEqual(params, ["123"])
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_rhs")
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_lhs")
def test_as_sql_with_non_numeric_rhs(
self, mock_process_lhs: MagicMock, mock_process_rhs: MagicMock
) -> None:
mock_process_lhs.return_value = ('"table"."slug"', [])
mock_process_rhs.return_value = ("%s", ["123slug"])
lookup = IdOrSlugLookup("slug__id_or_slug", "123slug")
sql, params = lookup.as_sql(self.compiler, self.connection)
self.assertEqual(sql, '"table"."slug" = %s')
self.assertEqual(params, ["123slug"])
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_rhs")
@patch("sentry.db.models.fields.slug.IdOrSlugLookup.process_lhs")
def test_as_sql_with_alphabetic_rhs(
self, mock_process_lhs: MagicMock, mock_process_rhs: MagicMock
) -> None:
mock_process_lhs.return_value = ('"table"."slug"', [])
mock_process_rhs.return_value = ("%s", ["slug"])
lookup = IdOrSlugLookup("slug__id_or_slug", "slug")
sql, params = lookup.as_sql(self.compiler, self.connection)
self.assertEqual(sql, '"table"."slug" = %s')
self.assertEqual(params, ["slug"])
| IdOrSlugLookupTests |
python | django__django | tests/db_functions/math/test_ln.py | {
"start": 267,
"end": 2227
} | class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_ln=Ln("normal")).first()
self.assertIsNone(obj.null_ln)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(n1_ln=Ln("n1"), n2_ln=Ln("n2")).first()
self.assertIsInstance(obj.n1_ln, Decimal)
self.assertIsInstance(obj.n2_ln, Decimal)
self.assertAlmostEqual(obj.n1_ln, Decimal(math.log(obj.n1)))
self.assertAlmostEqual(obj.n2_ln, Decimal(math.log(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_ln=Ln("f1"), f2_ln=Ln("f2")).first()
self.assertIsInstance(obj.f1_ln, float)
self.assertIsInstance(obj.f2_ln, float)
self.assertAlmostEqual(obj.f1_ln, math.log(obj.f1))
self.assertAlmostEqual(obj.f2_ln, math.log(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_ln=Ln("small"),
normal_ln=Ln("normal"),
big_ln=Ln("big"),
).first()
self.assertIsInstance(obj.small_ln, float)
self.assertIsInstance(obj.normal_ln, float)
self.assertIsInstance(obj.big_ln, float)
self.assertAlmostEqual(obj.small_ln, math.log(obj.small))
self.assertAlmostEqual(obj.normal_ln, math.log(obj.normal))
self.assertAlmostEqual(obj.big_ln, math.log(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Ln):
DecimalModel.objects.create(n1=Decimal("12.0"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("1.0"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__ln__gt=0).get()
self.assertEqual(obj.n1, Decimal("12.0"))
| LnTests |
python | getsentry__sentry | src/sentry/middleware/integrations/integration_control.py | {
"start": 461,
"end": 1958
} | class ____:
classifications: list[type[BaseClassification]] = [
IntegrationClassification,
PluginClassification,
]
"""
Classifications to determine whether request must be parsed, sorted in priority order.
getsentry expands this list on django initialization.
"""
def __init__(self, get_response: ResponseHandler):
self.get_response = get_response
def _should_operate(self, request: HttpRequest) -> bool:
"""
Determines whether this middleware will operate or just pass the request along.
"""
return SiloMode.get_current_mode() == SiloMode.CONTROL
@classmethod
def register_classifications(cls, classifications: list[type[BaseClassification]]) -> None:
"""
Add new classifications for middleware to determine request parsing dynamically.
Used in getsentry to expand scope of parsing.
"""
cls.classifications += classifications
def __call__(self, request: HttpRequest) -> HttpResponseBase:
if not self._should_operate(request):
return self.get_response(request)
# Check request against each classification, if a match is found, return early
for classification in self.classifications:
_cls = classification(response_handler=self.get_response)
if _cls.should_operate(request):
return _cls.get_response(request)
return self.get_response(request)
| IntegrationControlMiddleware |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/await1.py | {
"start": 325,
"end": 925
} | class ____:
id: int
async def func1(check: "Callable[[AnyMsg], bool]") -> AnyMsg: ...
async def func2():
_: Msg[Request] = await func1(check=lambda msg: (msg.body.id == 12345))
async def func3() -> AsyncIterator[int]:
yield 1
async def func4() -> int:
return await anext(func3())
async def func5(__fn: Callable[..., T]) -> T: ...
@overload
def sum(__iterable: Iterable[Literal[0]]) -> int: ...
@overload
def sum(__iterable: Iterable[T]) -> T: ...
def sum(__iterable: Iterable[Any]) -> Any: ...
async def func6(f: Callable[[], list[int]]):
sum(await func5(f))
| Request |
python | django__django | tests/staticfiles_tests/test_views.py | {
"start": 954,
"end": 1186
} | class ____(TestDefaults, TestServeStatic):
"""
Test static asset serving view with manually configured URLconf.
"""
@override_settings(DEBUG=True, ROOT_URLCONF="staticfiles_tests.urls.helper")
| TestServeStaticWithDefaultURL |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_templatetags.py | {
"start": 2509,
"end": 9568
} | class ____(Base):
restore_settings = ['THUMBNAIL_DEBUG', 'TEMPLATE_DEBUG']
def testTagInvalid(self):
# No args, or wrong number of args
src = '{% thumbnail %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source 80x80 as variable crop %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid option
src = '{% thumbnail source 240x200 invalid %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Old comma separated options format can only have an = for quality
src = '{% thumbnail source 80x80 crop=1,quality=1 %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid quality
src_invalid = '{% thumbnail source 240x200 quality=invalid_q %}'
src_missing = '{% thumbnail source 240x200 quality=missing_q %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src_invalid), '')
self.assertEqual(self.render_template(src_missing), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template,
src_invalid)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_missing)
# Invalid source
src = '{% thumbnail invalid_source 80x80 %}'
src_on_context = '{% thumbnail invalid_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_on_context)
# Non-existent source
src = '{% thumbnail non_existant_source 80x80 %}'
src_on_context = '{% thumbnail non_existant_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid size as a tuple:
src = '{% thumbnail source invalid_size %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(ValueError, self.render_template, src)
# Invalid size as a string:
src = '{% thumbnail source invalid_strsize %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Non-existent size
src = '{% thumbnail source non_existant_size %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source 240x240 HIGH_RESOLUTION %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
def testTag(self):
# Set THUMBNAIL_DEBUG = True to make it easier to trace any failures
settings.THUMBNAIL_DEBUG = True
# Basic
output = self.render_template(
'src="{% thumbnail source 240x240 %}"')
expected = self.verify_thumbnail((240, 180), {'size': (240, 240)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# Size from context variable
# as a tuple:
output = self.render_template(
'src="{% thumbnail source size %}"')
expected = self.verify_thumbnail((90, 68), {'size': (90, 100)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# as a string:
output = self.render_template(
'src="{% thumbnail source strsize %}"')
expected = self.verify_thumbnail((80, 60), {'size': (80, 90)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# On context
output = self.render_template(
'height:{% thumbnail source 240x240 as thumb %}{{ thumb.height }}')
self.assertEqual(output, 'height:180')
# With options and quality
output = self.render_template(
'src="{% thumbnail source 240x240 sharpen crop quality=95 %}"')
# Note that the opts are sorted to ensure a consistent filename.
expected = self.verify_thumbnail(
(240, 240),
{'size': (240, 240), 'crop': True, 'sharpen': True, 'quality': 95})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# With option and quality on context (also using its unicode method to
# display the url)
output = self.render_template(
'{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}'
'width:{{ thumb.width }}, url:{{ thumb.url }}')
self.assertEqual(output, 'width:240, url:%s' % expected_url)
# One dimensional resize
output = self.render_template('src="{% thumbnail source 100x0 %}"')
expected = self.verify_thumbnail((100, 75), {'size': (100, 0)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
def test_mirror_templatetag_library(self):
"""Testing the mirror `easy_thumbnails_tags` templatetag library.
Testing the loading {% load easy_thumbnails_tags %} instead of
traditional {% load thumbnail %}.
"""
# Set THUMBNAIL_DEBUG = True to make it easier to trace any failures
settings.THUMBNAIL_DEBUG = True
# Basic (just one basic test is enough)
output = self.render_template(
'src="{% thumbnail source 240x240 %}"',
'easy_thumbnails_tags'
)
expected = self.verify_thumbnail((240, 180), {'size': (240, 240)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
| ThumbnailTagTest |
python | apache__airflow | providers/docker/src/airflow/providers/docker/exceptions.py | {
"start": 1335,
"end": 1718
} | class ____(AirflowSkipException):
"""
Raised when a Docker container returns an error and task should be skipped.
:param logs: The log output of the failed Docker container
"""
def __init__(self, message: str | None = None, logs: list[str | bytes] | None = None) -> None:
super().__init__(message)
self.logs = logs
| DockerContainerFailedSkipException |
python | mlflow__mlflow | mlflow/tensorflow/__init__.py | {
"start": 37846,
"end": 62734
} | class ____(NamedTuple):
location: str
is_temp: bool
def _setup_callbacks(callbacks, log_every_epoch, log_every_n_steps):
"""
Adds TensorBoard and MlfLowTfKeras callbacks to the
input list, and returns the new list and appropriate log directory.
"""
from mlflow.tensorflow.autologging import _TensorBoard
from mlflow.tensorflow.callback import MlflowCallback, MlflowModelCheckpointCallback
tb = _get_tensorboard_callback(callbacks)
for callback in callbacks:
if isinstance(callback, MlflowCallback):
raise MlflowException(
"MLflow autologging must be turned off if an `MlflowCallback` is explicitly added "
"to the callback list. You are creating an `MlflowCallback` while having "
"autologging enabled. Please either call `mlflow.tensorflow.autolog(disable=True)` "
"to disable autologging or remove `MlflowCallback` from the callback list. "
)
if tb is None:
log_dir = _TensorBoardLogDir(location=tempfile.mkdtemp(), is_temp=True)
callbacks.append(_TensorBoard(log_dir.location))
else:
log_dir = _TensorBoardLogDir(location=tb.log_dir, is_temp=False)
callbacks.append(
MlflowCallback(
log_every_epoch=log_every_epoch,
log_every_n_steps=log_every_n_steps,
)
)
model_checkpoint = get_autologging_config(mlflow.tensorflow.FLAVOR_NAME, "checkpoint", True)
if model_checkpoint:
checkpoint_monitor = get_autologging_config(
mlflow.tensorflow.FLAVOR_NAME, "checkpoint_monitor", "val_loss"
)
checkpoint_mode = get_autologging_config(
mlflow.tensorflow.FLAVOR_NAME, "checkpoint_mode", "min"
)
checkpoint_save_best_only = get_autologging_config(
mlflow.tensorflow.FLAVOR_NAME, "checkpoint_save_best_only", True
)
checkpoint_save_weights_only = get_autologging_config(
mlflow.tensorflow.FLAVOR_NAME, "checkpoint_save_weights_only", False
)
checkpoint_save_freq = get_autologging_config(
mlflow.tensorflow.FLAVOR_NAME, "checkpoint_save_freq", "epoch"
)
if not any(isinstance(callback, MlflowModelCheckpointCallback) for callback in callbacks):
callbacks.append(
MlflowModelCheckpointCallback(
monitor=checkpoint_monitor,
mode=checkpoint_mode,
save_best_only=checkpoint_save_best_only,
save_weights_only=checkpoint_save_weights_only,
save_freq=checkpoint_save_freq,
)
)
return callbacks, log_dir
@autologging_integration(FLAVOR_NAME)
def autolog(
log_models=True,
log_datasets=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
registered_model_name=None,
log_input_examples=False,
log_model_signatures=True,
saved_model_kwargs=None,
keras_model_kwargs=None,
extra_tags=None,
log_every_epoch=True,
log_every_n_steps=None,
checkpoint=True,
checkpoint_monitor="val_loss",
checkpoint_mode="min",
checkpoint_save_best_only=True,
checkpoint_save_weights_only=False,
checkpoint_save_freq="epoch",
):
"""
Enables autologging for ``tf.keras``.
Note that only ``tensorflow>=2.3`` are supported.
As an example, try running the
`Keras/TensorFlow example <https://github.com/mlflow/mlflow/blob/master/examples/keras/train.py>`_.
For each TensorFlow module, autologging captures the following information:
**tf.keras**
- **Metrics** and **Parameters**
- Training and validation loss.
- User-specified metrics.
- Optimizer config, e.g., learning_rate, momentum, etc.
- Training configs, e.g., epochs, batch_size, etc.
- **Artifacts**
- Model summary on training start.
- Saved Keras model in `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ format.
- TensorBoard logs on training end.
**tf.keras.callbacks.EarlyStopping**
- **Metrics** and **Parameters**
- Metrics from the ``EarlyStopping`` callbacks: ``stopped_epoch``, ``restored_epoch``,
``restore_best_weight``, etc
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``:
``min_delta``, ``patience``, ``baseline``, ``restore_best_weights``, etc
Refer to the autologging tracking documentation for more
information on `TensorFlow workflows
<https://www.mlflow.org/docs/latest/tracking.html#tensorflow-and-keras-experimental>`_.
Note that autologging cannot be used together with explicit MLflow callback, i.e.,
`mlflow.tensorflow.MlflowCallback`, because it will cause the same metrics to be logged twice.
If you want to include `mlflow.tensorflow.MlflowCallback` in the callback list, please turn off
autologging by calling `mlflow.tensorflow.autolog(disable=True)`.
Args:
log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
log_datasets: If ``True``, dataset information is logged to MLflow Tracking.
If ``False``, dataset information is not logged.
disable: If ``True``, disables the TensorFlow autologging integration. If ``False``,
enables the TensorFlow integration autologging integration.
exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
disable_for_unsupported_versions: If ``True``, disable autologging for versions of
tensorflow that have not been tested against this version of the MLflow
client or are incompatible.
silent: If ``True``, suppress all event logs and warnings from MLflow during TensorFlow
autologging. If ``False``, show all events and warnings during TensorFlow
autologging.
registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name.
The registered model is created if it does not already exist.
log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with tf/keras model artifacts during training. If
``False``, input examples are not logged.
log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with tf/keras model artifacts during training. If ``False``,
signatures are not logged. Note that logging TensorFlow models
with signatures changes their pyfunc inference behavior when
Pandas DataFrames are passed to ``predict()``.
When a signature is present, an ``np.ndarray``
(for single-output models) or a mapping from
``str`` -> ``np.ndarray`` (for multi-output models) is returned;
when a signature is not present, a Pandas DataFrame is returned.
saved_model_kwargs: a dict of kwargs to pass to ``tensorflow.saved_model.save`` method.
keras_model_kwargs: a dict of kwargs to pass to ``keras_model.save`` method.
extra_tags: A dictionary of extra tags to set on each managed run created by autologging.
log_every_epoch: If True, training metrics will be logged at the end of each epoch.
log_every_n_steps: If set, training metrics will be logged every `n` training steps.
`log_every_n_steps` must be `None` when `log_every_epoch=True`.
checkpoint: Enable automatic model checkpointing.
checkpoint_monitor: In automatic model checkpointing, the metric name to monitor if
you set `model_checkpoint_save_best_only` to True.
checkpoint_mode: one of {"min", "max"}. In automatic model checkpointing,
if save_best_only=True, the decision to overwrite the current save file is made based on
either the maximization or the minimization of the monitored quantity.
checkpoint_save_best_only: If True, automatic model checkpointing only saves when
the model is considered the "best" model according to the quantity
monitored and previous checkpoint model is overwritten.
checkpoint_save_weights_only: In automatic model checkpointing, if True, then
only the model's weights will be saved. Otherwise, the optimizer states,
lr-scheduler states, etc are added in the checkpoint too.
checkpoint_save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
"""
import tensorflow as tf
if Version(tf.__version__) < Version("2.3"):
_logger.error(
"Could not log to MLflow because your Tensorflow version is below 2.3, detected "
f"version: {tf.__version__}."
)
return
@picklable_exception_safe_function
def _get_early_stop_callback(callbacks):
for callback in callbacks:
if isinstance(callback, tf.keras.callbacks.EarlyStopping):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {
"monitor": callback.monitor,
"min_delta": callback.min_delta,
"patience": callback.patience,
"baseline": callback.baseline,
"restore_best_weights": callback.restore_best_weights,
}
mlflow.log_params(earlystopping_params)
except Exception:
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception:
return None
def _log_early_stop_callback_metrics(callback, history, model_id=None):
from mlflow import log_metrics
if callback is None or not callback.model.stop_training:
return
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, _ = callback_attrs
log_metrics({"stopped_epoch": stopped_epoch}, synchronous=False, model_id=model_id)
if not restore_best_weights or callback.best_weights is None:
return
monitored_metric = history.history.get(callback.monitor)
if not monitored_metric:
return
initial_epoch = history.epoch[0]
# If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is
# the minimum loss), the epoch corresponding to the first occurrence of the best value is
# the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`
# attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197
restored_epoch = initial_epoch + monitored_metric.index(callback.best)
log_metrics({"restored_epoch": restored_epoch}, synchronous=False, model_id=model_id)
restored_index = history.epoch.index(restored_epoch)
restored_metrics = {
key: metrics[restored_index] for key, metrics in history.history.items()
}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
log_metrics(restored_metrics, stopped_epoch + 1, synchronous=False, model_id=model_id)
def _log_keras_model(history, args, model_id=None):
def _infer_model_signature(input_data_slice):
# In certain TensorFlow versions, calling `predict()` on model may modify
# the `stop_training` attribute, so we save and restore it accordingly
original_stop_training = history.model.stop_training
model_output = history.model.predict(input_data_slice)
history.model.stop_training = original_stop_training
return infer_signature(input_data_slice, model_output)
from mlflow.tensorflow.autologging import extract_tf_keras_input_example
def _get_tf_keras_input_example_slice():
input_training_data = args[0]
keras_input_example_slice = extract_tf_keras_input_example(input_training_data)
if keras_input_example_slice is None:
raise MlflowException(
"Cannot log input example or model signature for input with type"
f" {type(input_training_data)}. TensorFlow Keras autologging can"
" only log input examples and model signatures for the following"
" input types: numpy.ndarray, dict[string -> numpy.ndarray],"
" tensorflow.keras.utils.Sequence, and"
" tensorflow.data.Dataset (TensorFlow >= 2.1.0 required)",
INVALID_PARAMETER_VALUE,
)
return keras_input_example_slice
input_example, signature = resolve_input_example_and_signature(
_get_tf_keras_input_example_slice,
_infer_model_signature,
log_input_examples,
log_model_signatures,
_logger,
)
log_model(
history.model,
"model",
input_example=input_example,
signature=signature,
registered_model_name=get_autologging_config(
FLAVOR_NAME, "registered_model_name", None
),
saved_model_kwargs=saved_model_kwargs,
keras_model_kwargs=keras_model_kwargs,
model_id=model_id,
)
def _patched_inference(original, inst, *args, **kwargs):
log_dir = None
try:
unlogged_params = ["self", "x", "y", "callbacks", "validation_data", "verbose"]
batch_size = None
try:
is_single_input_model = isinstance(inst.input_shape, tuple)
training_data = kwargs["x"] if "x" in kwargs else args[0]
if isinstance(training_data, tf.data.Dataset) and hasattr(
training_data, "_batch_size"
):
batch_size = training_data._batch_size.numpy()
elif isinstance(training_data, tf.keras.utils.Sequence):
first_batch_inputs, *_ = training_data[0]
if is_single_input_model:
batch_size = len(first_batch_inputs)
else:
batch_size = len(first_batch_inputs[0])
elif is_iterator(training_data):
peek = next(training_data)
batch_size = len(peek[0]) if is_single_input_model else len(peek[0][0])
def __restore_generator(prev_generator):
yield peek
yield from prev_generator
restored_generator = __restore_generator(training_data)
if "x" in kwargs:
kwargs["x"] = restored_generator
else:
args = (restored_generator,) + args[1:]
except Exception as e:
_logger.warning(
"Encountered unexpected error while inferring batch size from training"
" dataset: %s",
e,
)
if batch_size is not None:
mlflow.log_param("batch_size", batch_size)
unlogged_params.append("batch_size")
log_fn_args_as_params(original, args, kwargs, unlogged_params)
# Check if the 'callback' argument of fit() is set positionally
if len(args) >= 6:
# Convert the positional training function arguments to a list in order to
# mutate the contents
args = list(args)
# Make a shallow copy of the preexisting callbacks to avoid permanently
# modifying their contents for future training invocations. Introduce
# TensorBoard & tf.keras callbacks if necessary
callbacks = list(args[5])
callbacks, log_dir = _setup_callbacks(
callbacks,
log_every_epoch=log_every_epoch,
log_every_n_steps=log_every_n_steps,
)
# Replace the callbacks positional entry in the copied arguments and convert
# the arguments back to tuple form for usage in the training function
args[5] = callbacks
args = tuple(args)
else:
# Make a shallow copy of the preexisting callbacks and introduce TensorBoard
# & tf.keras callbacks if necessary
callbacks = list(kwargs.get("callbacks") or [])
kwargs["callbacks"], log_dir = _setup_callbacks(
callbacks,
log_every_epoch=log_every_epoch,
log_every_n_steps=log_every_n_steps,
)
early_stop_callback = _get_early_stop_callback(callbacks)
_log_early_stop_callback_params(early_stop_callback)
model_id = None
if log_models:
model_id = _initialize_logged_model("model", flavor=FLAVOR_NAME).model_id
if log_datasets:
try:
context_tags = context_registry.resolve_tags()
source = CodeDatasetSource(tags=context_tags)
x = kwargs["x"] if "x" in kwargs else args[0]
if "y" in kwargs:
y = kwargs["y"]
elif len(args) >= 2:
y = args[1]
else:
y = None
if "validation_data" in kwargs:
validation_data = kwargs["validation_data"]
elif len(args) >= 8:
validation_data = args[7]
else:
validation_data = None
_log_tensorflow_dataset(x, source, "train", targets=y, model_id=model_id)
if validation_data is not None:
_log_tensorflow_dataset(validation_data, source, "eval", model_id=model_id)
except Exception as e:
_logger.warning(
"Failed to log training dataset information to MLflow Tracking. Reason: %s",
e,
)
history = original(inst, *args, **kwargs)
if log_models:
_log_keras_model(history, args, model_id=model_id)
_log_early_stop_callback_metrics(
callback=early_stop_callback,
history=history,
model_id=model_id,
)
# Ensure all data are logged.
# Shut down the async logging (instead of flushing)
# to avoid leaving zombie threads between patchings.
_shut_down_async_logging()
mlflow.log_artifacts(
local_dir=log_dir.location,
artifact_path="tensorboard_logs",
)
if log_dir.is_temp:
shutil.rmtree(log_dir.location)
return history
except (Exception, KeyboardInterrupt) as e:
try:
if log_dir is not None and log_dir.is_temp and os.path.exists(log_dir.location):
shutil.rmtree(log_dir.location)
finally:
# Regardless of what happens during the `_on_exception` callback, reraise
# the original implementation exception once the callback completes
raise e
safe_patch(
FLAVOR_NAME,
tf.keras.Model,
"fit",
_patched_inference,
manage_run=True,
extra_tags=extra_tags,
)
def _log_tensorflow_dataset(
tensorflow_dataset, source, context, name=None, targets=None, model_id=None
):
import tensorflow as tf
# create a dataset
if isinstance(tensorflow_dataset, np.ndarray):
dataset = from_numpy(features=tensorflow_dataset, targets=targets, source=source, name=name)
elif isinstance(tensorflow_dataset, tf.Tensor):
dataset = from_tensorflow(
features=tensorflow_dataset, targets=targets, source=source, name=name
)
elif isinstance(tensorflow_dataset, tf.data.Dataset):
dataset = from_tensorflow(features=tensorflow_dataset, source=source, name=name)
elif isinstance(tensorflow_dataset, tuple):
x = tensorflow_dataset[0]
y = tensorflow_dataset[1]
# check if x and y are tensors
if isinstance(x, tf.Tensor) and isinstance(y, tf.Tensor):
dataset = from_tensorflow(features=x, source=source, targets=y, name=name)
else:
dataset = from_numpy(features=x, targets=y, source=source, name=name)
else:
_logger.warning(
"Unrecognized dataset type %s. Dataset logging skipped.", type(tensorflow_dataset)
)
return
model = None if model_id is None else LoggedModelInput(model_id=model_id)
mlflow.log_input(dataset, context, model=model)
def load_checkpoint(model=None, run_id=None, epoch=None, global_step=None):
"""
If you enable "checkpoint" in autologging, during Keras model
training execution, checkpointed models are logged as MLflow artifacts.
Using this API, you can load the checkpointed model.
If you want to load the latest checkpoint, set both `epoch` and `global_step` to None.
If "checkpoint_save_freq" is set to "epoch" in autologging,
you can set `epoch` param to the epoch of the checkpoint to load specific epoch checkpoint.
If "checkpoint_save_freq" is set to an integer in autologging,
you can set `global_step` param to the global step of the checkpoint to load specific
global step checkpoint.
`epoch` param and `global_step` can't be set together.
Args:
model: A Keras model, this argument is required
only when the saved checkpoint is "weight-only".
run_id: The id of the run which model is logged to. If not provided,
current active run is used.
epoch: The epoch of the checkpoint to be loaded, if you set
"checkpoint_save_freq" to "epoch".
global_step: The global step of the checkpoint to be loaded, if
you set "checkpoint_save_freq" to an integer.
Returns:
The instance of a Keras model restored from the specified checkpoint.
.. code-block:: python
:caption: Example
import mlflow
mlflow.tensorflow.autolog(checkpoint=True, checkpoint_save_best_only=False)
model = create_tf_keras_model() # Create a Keras model
with mlflow.start_run() as run:
model.fit(data, label, epoch=10)
run_id = run.info.run_id
# load latest checkpoint model
latest_checkpoint_model = mlflow.tensorflow.load_checkpoint(run_id=run_id)
# load history checkpoint model logged in second epoch
checkpoint_model = mlflow.tensorflow.load_checkpoint(run_id=run_id, epoch=2)
"""
import tensorflow as tf
with TempDir() as tmp_dir:
downloaded_checkpoint_filepath = download_checkpoint_artifact(
run_id=run_id, epoch=epoch, global_step=global_step, dst_path=tmp_dir.path()
)
fname = os.path.splitext(downloaded_checkpoint_filepath)[0]
if fname.endswith(_WEIGHT_ONLY_CHECKPOINT_SUFFIX):
# the model is saved as weights only
if model is None:
raise MlflowException(
"The latest checkpoint is weights-only, 'model' argument must be provided"
)
model.load_weights(downloaded_checkpoint_filepath)
return model
return tf.keras.models.load_model(downloaded_checkpoint_filepath)
| _TensorBoardLogDir |
python | openai__gym | tests/utils/test_play.py | {
"start": 567,
"end": 799
} | class ____(gym.Wrapper):
def __init__(self, env, keys_to_action):
super().__init__(env)
self.keys_to_action = keys_to_action
def get_keys_to_action(self):
return self.keys_to_action
| KeysToActionWrapper |
python | pdm-project__pdm | src/pdm/pytest.py | {
"start": 1712,
"end": 3711
} | class ____(httpx.BaseTransport):
"""
A local file transport for HTTPX.
Allows to mock some HTTP requests with some local files
"""
def __init__(
self,
aliases: dict[str, Path],
overrides: IndexOverrides | None = None,
strip_suffix: bool = False,
):
super().__init__()
self.aliases = sorted(aliases.items(), key=lambda item: len(item[0]), reverse=True)
self.overrides = overrides if overrides is not None else {}
self.strip_suffix = strip_suffix
def get_file_path(self, path: str) -> Path | None:
for prefix, base_path in self.aliases:
if path.startswith(prefix):
file_path = base_path / path[len(prefix) :].lstrip("/")
if not self.strip_suffix:
return file_path
return next(
(p for p in file_path.parent.iterdir() if p.stem == file_path.name),
None,
)
return None
def handle_request(self, request: httpx.Request) -> httpx.Response:
request_path = request.url.path
file_path = self.get_file_path(request_path)
headers: dict[str, str] = {}
stream: httpx.SyncByteStream | None = None
content: bytes | None = None
if request_path in self.overrides:
status_code = 200
content = self.overrides[request_path]
headers["Content-Type"] = "text/html"
elif file_path is None or not file_path.exists():
status_code = 404
else:
status_code = 200
stream = FileByteStream(file_path.open("rb"))
if file_path.suffix == ".html":
headers["Content-Type"] = "text/html"
elif file_path.suffix == ".json":
headers["Content-Type"] = "application/vnd.pypi.simple.v1+json"
return httpx.Response(status_code, headers=headers, content=content, stream=stream)
| LocalIndexTransport |
python | pytorch__pytorch | torch/nn/modules/conv.py | {
"start": 66593,
"end": 69489
} | class ____(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv3d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`Conv3d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
.. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
# pyrefly: ignore [bad-argument-type]
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs,
)
# pyrefly: ignore [bad-override, bad-argument-type]
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
# pyrefly: ignore [bad-override, bad-argument-type]
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
# LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UninitializeParameter
| LazyConv3d |
python | kamyu104__LeetCode-Solutions | Python/counting-words-with-a-given-prefix.py | {
"start": 42,
"end": 259
} | class ____(object):
def prefixCount(self, words, pref):
"""
:type words: List[str]
:type pref: str
:rtype: int
"""
return sum(x.startswith(pref) for x in words)
| Solution |
python | getsentry__sentry | src/sentry_plugins/twilio/client.py | {
"start": 123,
"end": 1089
} | class ____(ApiClient):
plugin_name = "twilio"
allow_redirects = False
twilio_messages_endpoint = "https://api.twilio.com/2010-04-01/Accounts/{0}/Messages.json"
def __init__(self, account_sid, auth_token, sms_from, sms_to):
self.account_sid = account_sid
self.auth_token = auth_token
self.sms_from = sms_from
self.sms_to = sms_to
super().__init__()
def basic_auth(self, user, password):
return b"Basic " + b64encode(force_bytes(user + ":" + password))
def request(self, data):
endpoint = self.twilio_messages_endpoint.format(self.account_sid)
headers = {"Authorization": self.basic_auth(self.account_sid, self.auth_token)}
# Twilio doesn't accept the json headers, so set this to False
# https://www.twilio.com/docs/usage/your-request-to-twilio#post
return self._request(path=endpoint, method="post", data=data, headers=headers, json=False)
| TwilioApiClient |
python | milvus-io__pymilvus | tests/test_bulk_writer_stage.py | {
"start": 6013,
"end": 7994
} | class ____:
"""Test StageManager class."""
@pytest.fixture
def stage_manager(self) -> StageManager:
"""Create a StageManager instance."""
return StageManager(
cloud_endpoint="https://api.cloud.zilliz.com",
api_key="test_api_key",
)
@patch("pymilvus.bulk_writer.stage_manager.create_stage")
def test_create_stage(self, mock_create: Mock, stage_manager: StageManager) -> None:
"""Test creating a stage."""
stage_manager.create_stage(
project_id="test_project",
region_id="us-west-2",
stage_name="test_stage",
)
mock_create.assert_called_once_with(
stage_manager.cloud_endpoint,
stage_manager.api_key,
"test_project",
"us-west-2",
"test_stage",
)
@patch("pymilvus.bulk_writer.stage_manager.delete_stage")
def test_delete_stage(self, mock_delete: Mock, stage_manager: StageManager) -> None:
"""Test deleting a stage."""
stage_manager.delete_stage(stage_name="test_stage")
mock_delete.assert_called_once_with(
stage_manager.cloud_endpoint,
stage_manager.api_key,
"test_stage",
)
@patch("pymilvus.bulk_writer.stage_manager.list_stages")
def test_list_stages(self, mock_list: Mock, stage_manager: StageManager) -> None:
"""Test listing stages."""
mock_response = Mock()
mock_response.json.return_value = {"data": {"stages": ["stage1", "stage2"]}}
mock_list.return_value = mock_response
result = stage_manager.list_stages(project_id="test_project", current_page=1, page_size=10)
assert result.json()["data"]["stages"] == ["stage1", "stage2"]
mock_list.assert_called_once_with(
stage_manager.cloud_endpoint,
stage_manager.api_key,
"test_project",
1,
10,
)
| TestStageManager |
python | walkccc__LeetCode | solutions/3247. Number of Subsequences with Odd Sum/3247.py | {
"start": 0,
"end": 652
} | class ____:
def subsequenceCount(self, nums: list[int]) -> int:
MOD = 1_000_000_007
even = 0 # the number of subsequences with even sum
odd = 0 # the number of subsequences with odd sum
for num in nums:
if num % 2 == 0:
# Appending an even number to a subsequence doesn't change the parity.
# The even number itself is also a valid subsequence.
even, odd = even + even + 1, odd + odd
else:
# Appending an odd number to a subsequence changes the parity.
# The odd number itself is also a valid subsequence.
even, odd = even + odd, odd + even + 1
return odd % MOD
| Solution |
python | tornadoweb__tornado | tornado/gen.py | {
"start": 24376,
"end": 25823
} | class ____:
"""_NullFuture resembles a Future that finished with a result of None.
It's not actually a `Future` to avoid depending on a particular event loop.
Handled as a special case in the coroutine runner.
We lie and tell the type checker that a _NullFuture is a Future so
we don't have to leak _NullFuture into lots of public APIs. But
this means that the type checker can't warn us when we're passing
a _NullFuture into a code path that doesn't understand what to do
with it.
"""
def result(self) -> None:
return None
def done(self) -> bool:
return True
# _null_future is used as a dummy value in the coroutine runner. It differs
# from moment in that moment always adds a delay of one IOLoop iteration
# while _null_future is processed as soon as possible.
_null_future = typing.cast(Future, _NullFuture())
moment = typing.cast(Future, _NullFuture())
moment.__doc__ = """A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
In native coroutines, the equivalent of ``yield gen.moment`` is
``await asyncio.sleep(0)``.
.. versionadded:: 4.0
.. deprecated:: 4.5
``yield None`` (or ``yield`` with no argument) is now equivalent to
``yield gen.moment``.
"""
| _NullFuture |
python | h5py__h5py | h5py/tests/test_h5z.py | {
"start": 508,
"end": 1972
} | class ____(Structure):
"""H5Z_class2_t structure defining a filter"""
_fields_ = [
("version", c_int),
("id_", c_int),
("encoder_present", c_uint),
("decoder_present", c_uint),
("name", c_char_p),
("can_apply", c_void_p),
("set_local", c_void_p),
("filter_", H5ZFuncT),
]
@pytest.mark.thread_unsafe(reason="fixed filter_id")
def test_register_filter():
filter_id = 256 # Test ID
@H5ZFuncT
def failing_filter_callback(flags, cd_nelemts, cd_values, nbytes, buf_size, buf):
return 0
dummy_filter_class = H5ZClass2T(
version=h5z.CLASS_T_VERS,
id_=filter_id,
encoder_present=1,
decoder_present=1,
name=b"dummy filter",
can_apply=None,
set_local=None,
filter_=failing_filter_callback,
)
h5z.register_filter(addressof(dummy_filter_class))
try:
assert h5z.filter_avail(filter_id)
filter_flags = h5z.get_filter_info(filter_id)
assert (
filter_flags
== h5z.FILTER_CONFIG_ENCODE_ENABLED | h5z.FILTER_CONFIG_DECODE_ENABLED
)
finally:
h5z.unregister_filter(filter_id)
assert not h5z.filter_avail(filter_id)
@pytest.mark.mpi_skip
@insubprocess
def test_unregister_filter(request):
if h5py.h5z.filter_avail(h5py.h5z.FILTER_LZF):
res = h5py.h5z.unregister_filter(h5py.h5z.FILTER_LZF)
assert res
| H5ZClass2T |
python | django-extensions__django-extensions | tests/test_sqldiff.py | {
"start": 544,
"end": 6392
} | class ____(TestCase):
def setUp(self):
self.parser = Command().create_parser("test", "sqldiff")
self.args = ["-a"]
self.options = self.parser.parse_args(args=self.args)
self.tmp_out = StringIO()
self.tmp_err = StringIO()
def _include_proxy_models_testing(self, should_include_proxy_models): # type: (bool) -> ()
if should_include_proxy_models:
self.args.append("--include-proxy-models")
self.options = self.parser.parse_args(args=self.args)
instance = SqliteSQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
instance.load()
instance.find_differences()
checked_models = {
"%s.%s" % (app_label, model_name)
for app_label, model_name, _ in instance.differences
}
self.assertEqual(
should_include_proxy_models,
"testapp.PostWithTitleOrdering" in checked_models,
)
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.sqlite3",
reason="Test can only run on sqlite3",
)
def test_sql_diff_without_proxy_models(self):
self._include_proxy_models_testing(False)
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.sqlite3",
reason="Test can only run on sqlite3",
)
def test_sql_diff_with_proxy_models(self):
self._include_proxy_models_testing(True)
def test_format_field_names(self):
instance = MySQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
expected_field_name = ["name", "email", "address"]
self.assertEqual(
instance.format_field_names(["Name", "EMAIL", "aDDress"]),
expected_field_name,
)
def test_get_index_together(self):
instance = MySQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
self.assertEqual(
instance.get_index_together(SqlDiff._meta), [("number", "creator")]
)
self.assertEqual(
instance.get_index_together(SqlDiffIndexes._meta), [("first", "second")]
)
def test_get_unique_together(self):
instance = MySQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
self.assertEqual(
instance.get_unique_together(SluggedWithUniqueTogetherTestModel._meta),
[("slug", "category")],
)
self.assertEqual(
instance.get_unique_together(RandomCharTestModelUniqueTogether._meta),
[("random_char_field", "common_field")],
)
self.assertEqual(
instance.get_unique_together(SqlDiffUniqueTogether._meta), [("aaa", "bbb")]
)
self.assertEqual(
instance.get_unique_together(PostWithUniqField._meta),
[("common_field", "uniq_field")],
)
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.mysql",
reason="Test can only run on mysql",
)
def test_mysql_to_dict(self):
mysql_instance = MySQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
mysql_dict = mysql_instance.sql_to_dict(
"""select 1 as "foo", 1 + 1 as "BAR";""", []
)
self.assertEqual(mysql_dict, [{"bar": 2, "foo": 1}])
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.mysql",
reason="Test can only run on mysql",
)
@mock.patch(
"django_extensions.management.commands.sqldiff.MySQLDiff.format_field_names"
)
def test_invalid_mysql_to_dict(self, format_field_names):
format_field_names.side_effect = lambda x: x
mysql_instance = MySQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
mysql_dict = mysql_instance.sql_to_dict(
"""select 1 as "foo", 1 + 1 as "BAR";""", []
)
self.assertNotEquals(mysql_dict, [{"bar": 2, "foo": 1}])
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.sqlite3",
reason="Test can only run on sqlite3",
)
def test_sqlite_to_dict(self):
sqlite_instance = SqliteSQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
sqlite_dict = sqlite_instance.sql_to_dict(
"""select 1 as "foo", 1 + 1 as "BAR";""", []
)
self.assertEqual(sqlite_dict, [{"BAR": 2, "foo": 1}])
@pytest.mark.skipif(
settings.DATABASES["default"]["ENGINE"] != "django.db.backends.postgresql",
reason="Test can only run on postgresql",
)
def test_postgresql_to_dict(self):
postgresql_instance = PostgresqlSQLDiff(
apps.get_models(include_auto_created=True),
vars(self.options),
stdout=self.tmp_out,
stderr=self.tmp_err,
)
postgresql_dict = postgresql_instance.sql_to_dict(
"""select 1 as "foo", 1 + 1 as "BAR";""", []
)
self.assertEqual(postgresql_dict, [{"BAR": 2, "foo": 1}])
| SqlDiffTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass4.py | {
"start": 634,
"end": 705
} | class ____:
aa: C1
bb: C2 = C2()
cc: C3 = C3()
@dataclass
| DC3 |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 2495,
"end": 2573
} | class ____(models.Model):
base_name = models.CharField(max_length=100)
| BaseM |
python | run-llama__llama_index | llama-index-core/llama_index/core/output_parsers/selection.py | {
"start": 808,
"end": 3360
} | class ____(BaseOutputParser):
REQUIRED_KEYS = frozenset(Answer.__annotations__)
def _filter_dict(self, json_dict: dict) -> dict:
"""Filter recursively until a dictionary matches all REQUIRED_KEYS."""
output_dict = json_dict
for key, val in json_dict.items():
if key in self.REQUIRED_KEYS:
continue
elif isinstance(val, dict):
output_dict = self._filter_dict(val)
elif isinstance(val, list):
for item in val:
if isinstance(item, dict):
output_dict = self._filter_dict(item)
return output_dict
def _format_output(self, output: List[dict]) -> List[dict]:
output_json = []
for json_dict in output:
valid = True
for key in self.REQUIRED_KEYS:
if key not in json_dict:
valid = False
break
if not valid:
json_dict = self._filter_dict(json_dict)
output_json.append(json_dict)
return output_json
def parse(self, output: str) -> Any:
json_string = _marshal_llm_to_json(output)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
import yaml
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
if isinstance(json_obj, dict):
json_obj = [json_obj]
if not isinstance(json_obj, list):
raise ValueError(f"Failed to convert output to JSON: {output!r}")
json_output = self._format_output(json_obj)
answers = [Answer.from_dict(json_dict) for json_dict in json_output]
return StructuredOutput(raw_output=output, parsed_output=answers)
def format(self, prompt_template: str) -> str:
return prompt_template + "\n\n" + _escape_curly_braces(FORMAT_STR)
| SelectionOutputParser |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/recsys.py | {
"start": 12141,
"end": 16186
} | class ____(BatchMetricCallback):
"""NDCG metric callback.
Computes NDCG@topk for the specified values of `topk`.
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
prefix: key for the metric's name
topk: specifies which NDCG@K to log
log_on_batch: boolean flag to log computed metrics every batch
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_users, num_features, num_items = int(1e4), int(1e1), 10
X = torch.rand(num_users, num_features)
y = (torch.rand(num_users, num_items) > 0.5).to(torch.float32)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_items)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
num_epochs=3,
verbose=True,
callbacks=[
dl.BatchTransformCallback(
transform=torch.sigmoid,
scope="on_batch_end",
input_key="logits",
output_key="scores"
),
dl.CriterionCallback(
input_key="logits", target_key="targets", metric_key="loss"
),
dl.AUCCallback(input_key="scores", target_key="targets"),
dl.HitrateCallback(
input_key="scores", target_key="targets", topk=(1, 3, 5)
),
dl.MRRCallback(input_key="scores", target_key="targets", topk=(1, 3, 5)),
dl.MAPCallback(input_key="scores", target_key="targets", topk=(1, 3, 5)),
dl.NDCGCallback(input_key="scores", target_key="targets", topk=(1, 3)),
dl.OptimizerCallback(metric_key="loss"),
dl.SchedulerCallback(),
dl.CheckpointCallback(
logdir="./logs", loader_key="valid", metric_key="loss", minimize=True
),
]
)
.. note::
Metric names depending on input parameters:
- ``topk = (1,) or None`` ---> ``"ndcg01"``
- ``topk = (1, 3)`` ---> ``"ndcg01"``, ``"ndcg03"``
- ``topk = (1, 3, 5)`` ---> ``"ndcg01"``, ``"ndcg03"``, ``"ndcg05"``
You can find them in ``runner.batch_metrics``, ``runner.loader_metrics`` or
``runner.epoch_metrics``.
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
topk: Iterable[int] = None,
log_on_batch: bool = True,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=NDCGMetric(topk=topk, prefix=prefix, suffix=suffix),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
__all__ = ["HitrateCallback", "MAPCallback", "MRRCallback", "NDCGCallback"]
| NDCGCallback |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 840935,
"end": 841325
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ProjectColumn", graphql_name="node")
"""The item at the end of the edge."""
| ProjectColumnEdge |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-qdrant/destination_qdrant/config.py | {
"start": 525,
"end": 1879
} | class ____(BaseModel):
url: str = Field(..., title="Public Endpoint", description="Public Endpoint of the Qdrant cluser", order=0)
auth_method: Union[ApiKeyAuth, NoAuth] = Field(
default="api_key_auth",
title="Authentication Method",
description="Method to authenticate with the Qdrant Instance",
discriminator="mode",
type="object",
order=1,
)
prefer_grpc: bool = Field(
title="Prefer gRPC", description="Whether to prefer gRPC over HTTP. Set to true for Qdrant cloud clusters", default=True
)
collection: str = Field(..., title="Collection Name", description="The collection to load data into", order=2)
distance_metric: str = Field(
default="cos",
title="Distance Metric",
enum=["dot", "cos", "euc"],
description="The Distance metric used to measure similarities among vectors. This field is only used if the collection defined in the does not exist yet and is created automatically by the connector.",
)
text_field: str = Field(title="Text Field", description="The field in the payload that contains the embedded text", default="text")
class Config:
title = "Indexing"
schema_extra = {
"group": "Indexing",
"description": "Indexing configuration",
}
| QdrantIndexingConfigModel |
python | django__django | tests/order_with_respect_to/base_tests.py | {
"start": 194,
"end": 11561
} | class ____:
databases = {"default", "other"}
# Hook to allow subclasses to run these tests with alternate models.
Answer = None
Post = None
Question = None
@classmethod
def setUpTestData(cls):
cls.q1 = cls.Question.objects.create(
text="Which Beatle starts with the letter 'R'?"
)
cls.Answer.objects.create(text="John", question=cls.q1)
cls.Answer.objects.create(text="Paul", question=cls.q1)
cls.Answer.objects.create(text="George", question=cls.q1)
cls.Answer.objects.create(text="Ringo", question=cls.q1)
def test_default_to_insertion_order(self):
# Answers will always be ordered in the order they were inserted.
self.assertQuerySetEqual(
self.q1.answer_set.all(),
[
"John",
"Paul",
"George",
"Ringo",
],
attrgetter("text"),
)
def test_previous_and_next_in_order(self):
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = self.q1.answer_set.all()[0]
self.assertEqual(a1.text, "John")
self.assertEqual(a1.get_next_in_order().text, "Paul")
a2 = list(self.q1.answer_set.all())[-1]
self.assertEqual(a2.text, "Ringo")
self.assertEqual(a2.get_previous_in_order().text, "George")
def test_item_ordering(self):
# We can retrieve the ordering of the queryset from a particular item.
a1 = self.q1.answer_set.all()[1]
id_list = [o.pk for o in self.q1.answer_set.all()]
self.assertSequenceEqual(a1.question.get_answer_order(), id_list)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
a2 = self.Answer.objects.create(text="Number five", question=self.q1)
self.assertEqual(
list(a1.question.get_answer_order()), list(a2.question.get_answer_order())
)
def test_set_order_unrelated_object(self):
"""An answer that's not related isn't updated."""
q = self.Question.objects.create(text="other")
a = self.Answer.objects.create(text="Number five", question=q)
self.q1.set_answer_order([o.pk for o in self.q1.answer_set.all()] + [a.pk])
self.assertEqual(self.Answer.objects.get(pk=a.pk)._order, 0)
def test_change_ordering(self):
# The ordering can be altered
a = self.Answer.objects.create(text="Number five", question=self.q1)
# Swap the last two items in the order list
id_list = [o.pk for o in self.q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
# By default, the ordering is different from the swapped version
self.assertNotEqual(list(a.question.get_answer_order()), id_list)
# Change the ordering to the swapped version -
# this changes the ordering of the queryset.
a.question.set_answer_order(id_list)
self.assertQuerySetEqual(
self.q1.answer_set.all(),
["John", "Paul", "George", "Number five", "Ringo"],
attrgetter("text"),
)
def test_recursive_ordering(self):
p1 = self.Post.objects.create(title="1")
p2 = self.Post.objects.create(title="2")
p1_1 = self.Post.objects.create(title="1.1", parent=p1)
p1_2 = self.Post.objects.create(title="1.2", parent=p1)
self.Post.objects.create(title="2.1", parent=p2)
p1_3 = self.Post.objects.create(title="1.3", parent=p1)
self.assertSequenceEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
def test_delete_and_insert(self):
q1 = self.Question.objects.create(text="What is your favorite color?")
q2 = self.Question.objects.create(text="What color is it?")
a1 = self.Answer.objects.create(text="Blue", question=q1)
a2 = self.Answer.objects.create(text="Red", question=q1)
a3 = self.Answer.objects.create(text="Green", question=q1)
a4 = self.Answer.objects.create(text="Yellow", question=q1)
self.assertSequenceEqual(q1.answer_set.all(), [a1, a2, a3, a4])
a3.question = q2
a3.save()
a1.delete()
new_answer = self.Answer.objects.create(text="Black", question=q1)
self.assertSequenceEqual(q1.answer_set.all(), [a2, a4, new_answer])
def test_database_routing(self):
class WriteToOtherRouter:
def db_for_write(self, model, **hints):
return "other"
with self.settings(DATABASE_ROUTERS=[WriteToOtherRouter()]):
with (
self.assertNumQueries(0, using="default"),
self.assertNumQueries(
1,
using="other",
),
):
self.q1.set_answer_order([3, 1, 2, 4])
def test_bulk_create_with_empty_parent(self):
"""
bulk_create() should properly set _order when parent has no existing
children.
"""
question = self.Question.objects.create(text="Test Question")
answers = [self.Answer(question=question, text=f"Answer {i}") for i in range(3)]
answer0, answer1, answer2 = self.Answer.objects.bulk_create(answers)
self.assertEqual(answer0._order, 0)
self.assertEqual(answer1._order, 1)
self.assertEqual(answer2._order, 2)
def test_bulk_create_with_existing_children(self):
"""
bulk_create() should continue _order sequence from existing children.
"""
question = self.Question.objects.create(text="Test Question")
self.Answer.objects.create(question=question, text="Existing 0")
self.Answer.objects.create(question=question, text="Existing 1")
new_answers = [
self.Answer(question=question, text=f"New Answer {i}") for i in range(2)
]
answer2, answer3 = self.Answer.objects.bulk_create(new_answers)
self.assertEqual(answer2._order, 2)
self.assertEqual(answer3._order, 3)
def test_bulk_create_multiple_parents(self):
"""
bulk_create() should maintain separate _order sequences for different
parents.
"""
question0 = self.Question.objects.create(text="Question 0")
question1 = self.Question.objects.create(text="Question 1")
answers = [
self.Answer(question=question0, text="Q0 Answer 0"),
self.Answer(question=question1, text="Q1 Answer 0"),
self.Answer(question=question0, text="Q0 Answer 1"),
self.Answer(question=question1, text="Q1 Answer 1"),
]
created_answers = self.Answer.objects.bulk_create(answers)
answer_q0_0, answer_q1_0, answer_q0_1, answer_q1_1 = created_answers
self.assertEqual(answer_q0_0._order, 0)
self.assertEqual(answer_q0_1._order, 1)
self.assertEqual(answer_q1_0._order, 0)
self.assertEqual(answer_q1_1._order, 1)
def test_bulk_create_mixed_scenario(self):
"""
The _order field should be correctly set for new Answer objects based
on the count of existing Answers for each related Question.
"""
question0 = self.Question.objects.create(text="Question 0")
question1 = self.Question.objects.create(text="Question 1")
self.Answer.objects.create(question=question1, text="Q1 Existing 0")
self.Answer.objects.create(question=question1, text="Q1 Existing 1")
new_answers = [
self.Answer(question=question0, text="Q0 New 0"),
self.Answer(question=question1, text="Q1 New 0"),
self.Answer(question=question0, text="Q0 New 1"),
]
created_answers = self.Answer.objects.bulk_create(new_answers)
answer_q0_0, answer_q1_2, answer_q0_1 = created_answers
self.assertEqual(answer_q0_0._order, 0)
self.assertEqual(answer_q0_1._order, 1)
self.assertEqual(answer_q1_2._order, 2)
def test_bulk_create_respects_mixed_manual_order(self):
"""
bulk_create() should assign _order automatically only for instances
where it is not manually set. Mixed objects with and without _order
should result in expected final order values.
"""
question_a = self.Question.objects.create(text="Question A")
question_b = self.Question.objects.create(text="Question B")
# Existing answers to push initial _order forward.
self.Answer.objects.create(question=question_a, text="Q-A Existing 0")
self.Answer.objects.create(question=question_b, text="Q-B Existing 0")
self.Answer.objects.create(question=question_b, text="Q-B Existing 1")
answers = [
self.Answer(question=question_a, text="Q-A Manual 4", _order=4),
self.Answer(question=question_b, text="Q-B Auto 2"),
self.Answer(question=question_a, text="Q-A Auto"),
self.Answer(question=question_b, text="Q-B Manual 10", _order=10),
self.Answer(question=question_a, text="Q-A Manual 7", _order=7),
self.Answer(question=question_b, text="Q-B Auto 3"),
]
created_answers = self.Answer.objects.bulk_create(answers)
(
qa_manual_4,
qb_auto_2,
qa_auto,
qb_manual_10,
qa_manual_7,
qb_auto_3,
) = created_answers
# Manual values should stay untouched.
self.assertEqual(qa_manual_4._order, 4)
self.assertEqual(qb_manual_10._order, 10)
self.assertEqual(qa_manual_7._order, 7)
# Existing max was 0 → auto should get _order=1.
self.assertEqual(qa_auto._order, 1)
# Existing max was 1 → next auto gets 2, then 3 (manual 10 is skipped).
self.assertEqual(qb_auto_2._order, 2)
self.assertEqual(qb_auto_3._order, 3)
def test_bulk_create_allows_duplicate_order_values(self):
"""
bulk_create() should allow duplicate _order values if the model
does not enforce uniqueness on the _order field.
"""
question = self.Question.objects.create(text="Duplicated Test")
# Existing answer to set initial _order=0.
self.Answer.objects.create(question=question, text="Existing Answer")
# Two manually set _order=1 and one auto (which may also be assigned
# 1).
answers = [
self.Answer(question=question, text="Manual Order 1", _order=1),
self.Answer(question=question, text="Auto Order 1"),
self.Answer(question=question, text="Auto Order 2"),
self.Answer(question=question, text="Manual Order 1 Duplicate", _order=1),
]
created_answers = self.Answer.objects.bulk_create(answers)
manual_1, auto_1, auto_2, manual_2 = created_answers
# Manual values are as assigned, even if duplicated.
self.assertEqual(manual_1._order, 1)
self.assertEqual(manual_2._order, 1)
# Auto-assigned orders may also use 1 or any value, depending on
# implementation. If no collision logic, they may overlap with manual
# values.
self.assertEqual(auto_1._order, 1)
self.assertEqual(auto_2._order, 2)
| BaseOrderWithRespectToTests |
python | huggingface__transformers | src/transformers/models/vit_msn/modeling_vit_msn.py | {
"start": 8499,
"end": 10890
} | class ____(nn.Module):
def __init__(self, config: ViTMSNConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN
| ViTMSNSelfAttention |
python | pennersr__django-allauth | allauth/account/internal/flows/phone_verification.py | {
"start": 806,
"end": 2910
} | class ____(AbstractCodeVerificationProcess):
def __init__(self, user, state):
super().__init__(
user=user,
state=state,
timeout=app_settings.PHONE_VERIFICATION_TIMEOUT,
max_attempts=app_settings.PHONE_VERIFICATION_MAX_ATTEMPTS,
)
@property
def phone(self) -> str:
return self.state["phone"]
def send(self, skip_enumeration_sms: bool = False) -> None:
ratelimit.consume(
context.request,
action="verify_phone",
key=self.phone,
raise_exception=True,
)
adapter = get_adapter()
if self.user:
code = adapter._generate_phone_verification_code_compat(
user=self.user,
phone=self.phone,
)
else:
code = ""
self.state["code"] = code
self.send_sms(skip_enumeration_sms)
get_adapter().add_message(
context.request,
messages.INFO,
"account/messages/phone_verification_sent.txt",
{"phone": self.phone},
)
self.persist()
def send_sms(self, skip_enumeration_sms: bool) -> None:
adapter = get_adapter()
if not self.user or self.state.get("account_already_exists"):
if not skip_enumeration_sms:
if self.state.get("account_already_exists") or self.state.get("signup"):
adapter.send_account_already_exists_sms(self.phone)
else:
adapter.send_unknown_account_sms(self.phone)
return
adapter.send_verification_code_sms(
user=self.user,
code=self.code,
phone=self.phone,
)
def finish(self) -> None:
phone = self.state["phone"]
adapter = get_adapter()
adapter.set_phone_verified(self.user, phone)
adapter.add_message(
context.request,
messages.SUCCESS,
"account/messages/phone_verified.txt",
{"phone": phone},
)
| PhoneVerificationProcess |
python | django__django | tests/prefetch_related/tests.py | {
"start": 70492,
"end": 76898
} | class ____(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title="book1"),
Book.objects.create(title="book2"),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name="Author11"),
Author.objects.create(first_book=cls.book1, name="Author12"),
Author.objects.create(first_book=cls.book2, name="Author21"),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address="Happy place"),
AuthorAddress.objects.create(author=cls.author12, address="Haunted house"),
AuthorAddress.objects.create(author=cls.author21, address="Happy place"),
]
cls.bookwithyear1 = BookWithYear.objects.create(
title="Poems", published_year=2010
)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the
same lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"]).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(
book1.first_time_authors.all(), [self.author11, self.author12]
)
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(
book1.first_time_authors.all()[0].addresses.all(),
[self.author1_address1],
)
self.assertSequenceEqual(
book1.first_time_authors.all()[1].addresses.all(), []
)
self.assertSequenceEqual(
book2.first_time_authors.all()[0].addresses.all(),
[self.author2_address1],
)
self.assertEqual(
list(book1.first_time_authors.all()),
list(book1.first_time_authors.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()),
list(book2.first_time_authors.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all()),
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"]).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
to_attr="happy_place",
)
),
to_attr="first_authors",
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(
book1.first_authors[0].happy_place, [self.author1_address1]
)
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(
book2.first_authors[0].happy_place, [self.author2_address1]
)
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
(bookwithyear1,) = BookWithYear.objects.prefetch_related("bookreview_set")
with self.assertNumQueries(0):
self.assertCountEqual(
bookwithyear1.bookreview_set.all(), [self.bookreview1]
)
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], "bookreview_set")
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(
bookwithyear.bookreview_set.all(), [self.bookreview1, new_review]
)
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
| DirectPrefetchedObjectCacheReuseTests |
python | keras-team__keras | keras/src/distillation/distillation_loss.py | {
"start": 9661,
"end": 14752
} | class ____(DistillationLoss):
"""Distillation loss that transfers knowledge from final model outputs.
This distillation loss applies temperature scaling to the teacher's logits
before computing the loss between teacher and student predictions. It's the
most common approach for knowledge distillation.
Arguments:
temperature: Temperature for softmax scaling. Higher values produce
softer probability distributions that are easier for the student to
learn. Typical values range from 3-5. Defaults to 3.0.
loss: Loss function to use for distillation. Can be:
- String identifier (e.g., 'kl_divergence',
'categorical_crossentropy')
- Keras loss instance
- Nested structure of losses matching the model output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'kl_divergence'.
Examlpe(s):
```python
# Basic logits distillation with KL divergence
distillation_loss = LogitsDistillation(temperature=3.0)
# With categorical crossentropy loss
distillation_loss = LogitsDistillation(
temperature=4.0,
loss="categorical_crossentropy"
)
# With custom loss instance
distillation_loss = LogitsDistillation(
temperature=4.0,
loss=keras.losses.CategoricalCrossentropy(from_logits=True)
)
# For multi-output models
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", "categorical_crossentropy"]
)
# For multi-output models, only distill some outputs
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", None] # Skip second output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self,
temperature=3.0,
loss="kl_divergence",
):
self.temperature = temperature
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError("At least one loss must be non-`None`.")
if not isinstance(self.temperature, (int, float)):
raise ValueError(
f"temperature must be a number, got {type(self.temperature)}"
)
if self.temperature <= 0.0:
raise ValueError("temperature must be positive.")
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute distillation loss using the configured loss function.
Arguments:
teacher_outputs: Logits from teacher model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
student_outputs: Logits from student model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
**kwargs: Additional arguments (ignored).
Returns:
Distillation loss tensor.
"""
# Apply temperature scaling using tree.map_structure
teacher_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), teacher_outputs
)
student_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), student_outputs
)
# Apply loss function(s) to corresponding outputs
def apply_loss(loss_fn, teacher_logits, student_logits):
if loss_fn is None:
return 0.0
# Special handling for KL divergence (needs probabilities)
if isinstance(loss_fn, keras.losses.KLDivergence):
teacher_probs = keras.ops.softmax(teacher_logits, axis=-1)
student_probs = keras.ops.softmax(student_logits, axis=-1)
loss = keras.ops.mean(loss_fn(teacher_probs, student_probs))
# Scale by temperature^2 for KL (per literature)
return loss * (self.temperature**2)
else:
# For other losses, use logits directly
return keras.ops.mean(loss_fn(teacher_logits, student_logits))
# Apply losses using tree.map_structure
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_scaled, student_scaled
)
# Sum all losses and return scalar
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"temperature": self.temperature,
"loss": serialization_lib.serialize_keras_object(self.loss),
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
| LogitsDistillation |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 14263,
"end": 14459
} | class ____(logging.Logger):
def __init__(self, context: "PipesContext") -> None:
super().__init__(name="dagster-pipes")
self.addHandler(_PipesLoggerHandler(context))
| _PipesLogger |
python | spack__spack | lib/spack/spack/llnl/util/lock.py | {
"start": 27982,
"end": 28262
} | class ____(LockTransaction):
"""LockTransaction context manager that does a read and releases it."""
def _enter(self):
return self._lock.acquire_read(self._timeout)
def _exit(self, release_fn):
return self._lock.release_read(release_fn)
| ReadTransaction |
python | sphinx-doc__sphinx | sphinx/util/docfields.py | {
"start": 1389,
"end": 6216
} | class ____:
"""A doc field that is never grouped. It can have an argument or not, the
argument can be linked using a specified *rolename*. Field should be used
for doc fields that usually don't occur more than once.
The body can be linked using a specified *bodyrolename* if the content is
just a single inline or text node.
Example::
:returns: description of the return value
:rtype: description of the return type
"""
is_grouped = False
is_typed = False
def __init__(
self,
name: str,
names: tuple[str, ...] = (),
label: str = '',
has_arg: bool = True,
rolename: str = '',
bodyrolename: str = '',
) -> None:
self.name = name
self.names = names
self.label = label
self.has_arg = has_arg
self.rolename = rolename
self.bodyrolename = bodyrolename
def make_xref(
self,
rolename: str,
domain: str,
target: str,
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
contnode: Node | None = None,
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Element | None = None,
) -> Node:
# note: for backwards compatibility env is last, but not optional
assert env is not None
assert (inliner is None) == (location is None), (inliner, location)
if not rolename:
return contnode or innernode(target, target) # type: ignore[call-arg]
# The domain is passed from DocFieldTransformer. So it surely exists.
# So we don't need to take care the env.get_domain() raises an exception.
role = env.get_domain(domain).role(rolename)
if role is None or inliner is None:
if role is None and inliner is not None:
msg = __(
'Problem in %s domain: field is supposed '
"to use role '%s', but that role is not in the domain."
)
logger.warning(__(msg), domain, rolename, location=location)
refnode = addnodes.pending_xref(
'',
refdomain=domain,
refexplicit=False,
reftype=rolename,
reftarget=target,
)
refnode += contnode or innernode(target, target) # type: ignore[call-arg]
env.get_domain(domain).process_field_xref(refnode)
return refnode
lineno = -1
if location is not None:
with contextlib.suppress(ValueError):
lineno = get_node_line(location)
ns, _messages = role(rolename, target, target, lineno, inliner, {}, [])
return nodes.inline(target, '', *ns)
def make_xrefs(
self,
rolename: str,
domain: str,
target: str,
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
contnode: Node | None = None,
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Element | None = None,
) -> list[Node]:
return [
self.make_xref(
rolename, domain, target, innernode, contnode, env, inliner, location
)
]
def make_entry(self, fieldarg: str, content: list[Node]) -> _FieldEntry:
return fieldarg, content
def make_field(
self,
types: _FieldTypes,
domain: str,
item: _FieldEntry,
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Element | None = None,
) -> nodes.field:
fieldarg, content = item
fieldname = nodes.field_name('', self.label)
if fieldarg:
fieldname += nodes.Text(' ')
fieldname.extend(
self.make_xrefs(
self.rolename,
domain,
fieldarg,
nodes.Text,
env=env,
inliner=inliner,
location=location,
)
)
if len(content) == 1 and (
isinstance(content[0], nodes.Text)
or (
isinstance(content[0], nodes.inline)
and len(content[0]) == 1
and isinstance(content[0][0], nodes.Text)
)
):
content = self.make_xrefs(
self.bodyrolename,
domain,
content[0].astext(),
contnode=content[0],
env=env,
inliner=inliner,
location=location,
)
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
return nodes.field('', fieldname, fieldbody)
| Field |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_kueue.py | {
"start": 7134,
"end": 9108
} | class ____:
def test_template_fields(self):
expected_template_fields = {"queue_name"} | set(KubernetesJobOperator.template_fields)
assert set(KubernetesStartKueueJobOperator.template_fields) == expected_template_fields
def test_init(self):
operator = KubernetesStartKueueJobOperator(
task_id=TEST_TASK_ID, queue_name=TEST_QUEUE_NAME, suspend=True
)
assert operator.queue_name == TEST_QUEUE_NAME
assert operator.suspend is True
assert operator.labels == {"kueue.x-k8s.io/queue-name": TEST_QUEUE_NAME}
assert operator.annotations == {"kueue.x-k8s.io/queue-name": TEST_QUEUE_NAME}
def test_init_suspend_is_false(self):
expected_error_message = (
"The `suspend` parameter can't be False. If you want to use Kueue for running Job"
" in a Kubernetes cluster, set the `suspend` parameter to True."
)
with pytest.raises(AirflowException, match=expected_error_message):
KubernetesStartKueueJobOperator(task_id=TEST_TASK_ID, queue_name=TEST_QUEUE_NAME, suspend=False)
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesStartKueueJobOperator.log"))
def test_init_suspend_is_none(self, mock_log):
expected_info_message = (
"You have not set parameter `suspend` in class %s. "
"For running a Job in Kueue the `suspend` parameter has been set to True."
)
operator = KubernetesStartKueueJobOperator(
task_id=TEST_TASK_ID,
queue_name=TEST_QUEUE_NAME,
)
assert operator.queue_name == TEST_QUEUE_NAME
assert operator.suspend is True
assert operator.labels == {"kueue.x-k8s.io/queue-name": TEST_QUEUE_NAME}
assert operator.annotations == {"kueue.x-k8s.io/queue-name": TEST_QUEUE_NAME}
mock_log.info.assert_called_once_with(expected_info_message, "KubernetesStartKueueJobOperator")
| TestKubernetesStartKueueJobOperator |
python | simonw__datasette | datasette/utils/__init__.py | {
"start": 6801,
"end": 8065
} | class ____(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode("utf8")
except UnicodeDecodeError:
return {
"$base64": True,
"encoded": base64.b64encode(obj).decode("latin1"),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.perf_counter() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It takes about 0.08ms to execute 1000.
# https://github.com/simonw/datasette/issues/1679
n = 1000
if ms <= 20:
# This mainly happens while executing our test suite
n = 1
def handler():
if time.perf_counter() >= deadline:
# Returning 1 terminates the query with an error
return 1
conn.set_progress_handler(handler, n)
try:
yield
finally:
conn.set_progress_handler(None, n)
| CustomJSONEncoder |
python | pandas-dev__pandas | pandas/tests/series/test_unary.py | {
"start": 72,
"end": 1620
} | class ____:
# __neg__, __pos__, __invert__
def test_neg(self):
ser = Series(range(5), dtype="float64", name="series")
tm.assert_series_equal(-ser, -1 * ser)
def test_invert(self):
ser = Series(range(5), dtype="float64", name="series")
tm.assert_series_equal(-(ser < 0), ~(ser < 0))
@pytest.mark.parametrize(
"source, neg_target, abs_target",
[
([1, 2, 3], [-1, -2, -3], [1, 2, 3]),
([1, 2, None], [-1, -2, None], [1, 2, None]),
],
)
def test_all_numeric_unary_operators(
self, any_numeric_ea_dtype, source, neg_target, abs_target
):
# GH38794
dtype = any_numeric_ea_dtype
ser = Series(source, dtype=dtype)
neg_result, pos_result, abs_result = -ser, +ser, abs(ser)
if dtype.startswith("U"):
neg_target = -Series(source, dtype=dtype)
else:
neg_target = Series(neg_target, dtype=dtype)
abs_target = Series(abs_target, dtype=dtype)
tm.assert_series_equal(neg_result, neg_target)
tm.assert_series_equal(pos_result, ser)
tm.assert_series_equal(abs_result, abs_target)
@pytest.mark.parametrize("op", ["__neg__", "__abs__"])
def test_unary_float_op_mask(self, float_ea_dtype, op):
dtype = float_ea_dtype
ser = Series([1.1, 2.2, 3.3], dtype=dtype)
result = getattr(ser, op)()
target = result.copy(deep=True)
ser[0] = None
tm.assert_series_equal(result, target)
| TestSeriesUnaryOps |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_indexing.py | {
"start": 2286,
"end": 3375
} | class ____:
def test_get_loc_key_unit_mismatch(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
key = idx[1].as_unit("ms")
loc = idx.get_loc(key)
assert loc == 1
def test_get_loc_key_unit_mismatch_not_castable(self):
tdi = to_timedelta(["0 days", "1 days", "2 days"]).astype("m8[s]")
assert tdi.dtype == "m8[s]"
key = tdi[0].as_unit("ns") + Timedelta(1)
with pytest.raises(KeyError, match=r"Timedelta\('0 days 00:00:00.000000001'\)"):
tdi.get_loc(key)
assert key not in tdi
def test_get_loc(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc("0 days") == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"])
assert tidx.get_loc(NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float("nan")) == 1
assert tidx.get_loc(np.nan) == 1
| TestGetLoc |
python | Pylons__pyramid | tests/test_scripts/test_common.py | {
"start": 18,
"end": 436
} | class ____(unittest.TestCase):
def test_parse_vars_good(self):
from pyramid.scripts.common import parse_vars
vars = ['a=1', 'b=2']
result = parse_vars(vars)
self.assertEqual(result, {'a': '1', 'b': '2'})
def test_parse_vars_bad(self):
from pyramid.scripts.common import parse_vars
vars = ['a']
self.assertRaises(ValueError, parse_vars, vars)
| TestParseVars |
python | jupyterlab__jupyterlab | jupyterlab/tests/test_registry.py | {
"start": 319,
"end": 4323
} | class ____(AppHandlerTest):
def test_node_not_available(self):
# patch should be applied on `jupyterlab.commands` and not on `jupyterlab_server.process`
# See https://docs.python.org/3/library/unittest.mock.html#where-to-patch
with patch("jupyterlab.commands.which") as which:
which.side_effect = ValueError("Command not found")
logger = logging.getLogger("jupyterlab")
config = commands._yarn_config(logger)
which.assert_called_once_with("node")
self.assertDictEqual(config, {"yarn config": {}, "npm config": {}})
def test_yarn_config(self):
with patch("subprocess.check_output") as check_output:
yarn_registry = "https://private.yarn/manager"
check_output.return_value = b"\n".join(
[
b'{"type":"info","data":"yarn config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
b'{"type":"info","data":"npm config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
]
)
logger = logging.getLogger("jupyterlab")
config = commands._yarn_config(logger)
self.assertDictEqual(
config,
{
"yarn config": {"registry": yarn_registry},
"npm config": {"registry": yarn_registry},
},
)
def test_yarn_config_failure(self):
with patch("subprocess.check_output") as check_output:
check_output.side_effect = subprocess.CalledProcessError(
1, ["yarn", "config", "list"], b"", stderr=b"yarn config failed."
)
logger = logging.getLogger("jupyterlab")
config = commands._yarn_config(logger)
self.assertDictEqual(config, {"yarn config": {}, "npm config": {}})
def test_get_registry(self):
with patch("subprocess.check_output") as check_output:
yarn_registry = "https://private.yarn/manager"
check_output.return_value = b"\n".join(
[
b'{"type":"info","data":"yarn config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
b'{"type":"info","data":"npm config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
]
)
handler = commands.AppOptions()
self.assertEqual(handler.registry, yarn_registry)
def test_populate_staging(self):
with patch("subprocess.check_output") as check_output:
yarn_registry = "https://private.yarn/manager"
check_output.return_value = b"\n".join(
[
b'{"type":"info","data":"yarn config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
b'{"type":"info","data":"npm config"}',
b'{"type":"inspect","data":{"registry":"'
+ bytes(yarn_registry, "utf-8")
+ b'"}}',
]
)
staging = pjoin(self.app_dir, "staging")
handler = commands._AppHandler(commands.AppOptions())
handler._populate_staging()
lock_path = pjoin(staging, "yarn.lock")
with open(lock_path) as f:
lock = f.read()
# yarn >=2.x does not record the registry in the lockfile
self.assertNotIn(commands.YARN_DEFAULT_REGISTRY, lock)
self.assertNotIn(yarn_registry, lock)
| TestAppHandlerRegistry |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 116397,
"end": 116811
} | class ____(sgqlc.types.Enum):
"""The possible states of a subscription.
Enumeration Choices:
* `IGNORED`: The User is never notified.
* `SUBSCRIBED`: The User is notified of all conversations.
* `UNSUBSCRIBED`: The User is only notified when participating or
@mentioned.
"""
__schema__ = github_schema
__choices__ = ("IGNORED", "SUBSCRIBED", "UNSUBSCRIBED")
| SubscriptionState |
python | plotly__plotly.py | plotly/graph_objs/histogram/_stream.py | {
"start": 233,
"end": 3521
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram"
_path_str = "histogram.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 2577,
"end": 2661
} | class ____(_FirstBase[A, B], _SecondBase[float, bool]):
pass
| OneGenericOneConrete2 |
python | google__jax | jax/_src/linear_util.py | {
"start": 3427,
"end": 4082
} | class ____:
"""Storage for a value, with checks for overwriting or reading empty store."""
__slots__ = ("_val",)
def __init__(self):
self._val = _EMPTY_STORE_VALUE
def store(self, val):
if self._val is not _EMPTY_STORE_VALUE:
raise StoreException("Store occupied")
self._val = val
def reset(self):
# This should only be called in exceptional circumstances (e.g. debugging).
self._val = _EMPTY_STORE_VALUE
@property
def val(self):
if not self:
raise StoreException("Store empty")
return self._val
def __nonzero__(self):
return self._val is not _EMPTY_STORE_VALUE
__bool__ = __nonzero__
| Store |
python | dask__distributed | distributed/process.py | {
"start": 1621,
"end": 1700
} | class ____:
is_alive = False
pid = None
exitcode = None
| _ProcessState |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 2055,
"end": 2289
} | class ____(unittest.TestCase):
def setUp(self):
models.Base.metadata.create_all(models.engine)
def tearDown(self):
models.session.remove()
models.Base.metadata.drop_all(models.engine)
| TransactionTestCase |
python | huggingface__transformers | src/transformers/models/d_fine/modular_d_fine.py | {
"start": 33384,
"end": 33442
} | class ____(RTDetrDecoderOutput):
pass
| DFineDecoderOutput |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/async/guestbook.py | {
"start": 669,
"end": 790
} | class ____(ndb.Model):
content = ndb.StringProperty()
post_date = ndb.DateTimeProperty(auto_now_add=True)
| Guestbook |
python | pytorch__pytorch | torch/_inductor/codegen/halide.py | {
"start": 19654,
"end": 61617
} | class ____(SIMDKernel):
overrides = HalideOverrides # type: ignore[assignment]
kexpr: Callable[[sympy.Expr], str] = texpr
def __init__(
self,
tiling: dict[str, sympy.Expr],
**kwargs,
) -> None:
super().__init__(tiling, **kwargs)
# For halide, we just write directly to the body
self.compute = self.body
self.loads = self.body
self.stores = self.body
self.indexing_code_dom = IndentedBuffer()
self.needs_dom_indexing = self.inside_reduction
self.has_reduction = self.inside_reduction
self.buffer_dimensions: dict[str, list[DimensionInfo]] = {}
self.buffer_offsets: dict[str, sympy.Expr] = {}
# {h0: size1, h1: size2, ...}
self.halide_vars: dict[sympy.Symbol, sympy.Expr] = {}
# {x0: h0, x1: h1+10*h2, ...}
self.index_replacements: dict[sympy.Expr, sympy.Expr] = {}
# {h1: hr1, ...}
self.reduction_renames: dict[sympy.Symbol, sympy.Symbol] = {}
# {"i": {h0: hi0}, "o": ...}
self.dom_renames: dict[str, dict[sympy.Symbol, sympy.Symbol]] = {}
# {"in_ptr0": ["in_ptr0_view0"], ...}
self.buffer_aliases: dict[str, list[str]] = defaultdict(list)
self.has_indirect_indexing = False
def dtype_to_str(self, dtype: torch.dtype) -> str:
return halide_type(dtype)
# pyrefly: ignore [bad-override]
def create_cse_var(self, name, bounds=None, dtype=None, shape=None):
self.body.writeline(f"{name} = hl.Func({name!r})")
# pyrefly: ignore [bad-argument-type]
return HalideCSEVariable(name, bounds, dtype, shape)
def finalize_indexing(self, indices: Sequence[sympy.Expr]):
"""
Hook called right before codegen with every index that will be
used in the fused kernel.
This populates self.halide_vars/index_replacements/reduction_renames which is an alternate indexing
scheme that avoids using divide and modulus. Instead of xindex/yindex/rindex
we base indexing on a larger number of vars whose product combines to those.
This function populates self.halide_vars, self.index_replacements, and self.reduction_renames
"""
assert not (
self.index_replacements or self.halide_vars or self.reduction_renames
)
size_hint = functools.partial(V.graph.sizevars.size_hint, fallback=inf) # type: ignore[arg-type]
# pyrefly: ignore [bad-assignment]
indices = dict.fromkeys(map(super().prepare_indexing, indices))
all_used_symbols = OrderedSet[Any]()
sym_to_node = {
n.symbol(): n
for n in itertools.chain.from_iterable(
[tree.nodes.values() for tree in self.range_trees]
)
}
def simplify(expr):
return sympy.simplify(
V.graph.sizevars.remove_precomputed_replacements(expr)
)
def visit_modular_indexing(base, divisor, modulus):
if base in sym_to_node:
node = sym_to_node[base]
all_used_symbols.add(
node.root.lookup(
node.divisor * divisor,
V.graph.sizevars.evaluate_min(
modulus, FloorDiv(node.length, divisor)
),
).symbol()
)
def visit_floor_div(base, divisor):
if base in sym_to_node:
node = sym_to_node[base]
all_used_symbols.add(
node.root.lookup(
node.divisor * divisor,
FloorDiv(node.length, divisor),
).symbol()
)
# first figure out all_used_symbols to do dead symbol elimination
for index in indices:
if index.has(ModularIndexing):
index.replace(
ModularIndexing(
sympy.Wild("base"),
sympy.Wild("divisor"),
sympy.Wild("modulus"),
),
visit_modular_indexing,
)
if index.has(FloorDiv):
index.replace(
FloorDiv(
sympy.Wild("base"),
sympy.Wild("divisor"),
),
visit_floor_div,
)
all_used_symbols.update(super().prepare_indexing(index).free_symbols)
self.has_indirect_indexing = any(
symbol_is_type(sym, SymT.INDIRECT) for sym in all_used_symbols
)
had_fallback = False
for tree in reversed(self.range_trees):
nodes = [n for n in tree.nodes.values() if n.symbol() in all_used_symbols]
nodes.sort(key=lambda n: size_hint(n.divisor))
if not nodes:
nodes.append(tree.lookup(1, tree.numel))
handled_count = 0
divisor = sympy.S.One
added_sym_size = []
# decide on a minimal set of symbols and put them in self.halide_vars
while handled_count < len(nodes) and not eq(tree.numel, divisor):
sizes_to_add = [
simplify(n.length) for n in nodes if eq(n.divisor, divisor)
]
handled_count += len(sizes_to_add)
assert sizes_to_add, nodes
end = divisor * functools.reduce(
V.graph.sizevars.evaluate_max, sizes_to_add
)
sizes_to_add.extend(
[
simplify(n.divisor / divisor)
for n in nodes
if lt(divisor, n.divisor) and lt(n.divisor, end)
]
)
while sizes_to_add:
next_size = functools.reduce(sympy.gcd, sizes_to_add)
if eq(next_size, 1):
# sizes share no common factors, e.g [2, 21, 42, 441, 889056]
# TODO(jansel): we should just prevent fusion in cases that hit this
next_size = simplify(tree.numel / divisor)
assert not eq(next_size, 1)
sizes_to_add = []
handled_count = len(nodes)
had_fallback = True
sym = sympy_index_symbol(f"h{len(self.halide_vars)}")
# pyrefly: ignore [missing-argument]
if tree.is_reduction:
self.reduction_renames[sym] = sympy_index_symbol(
f"hr{len(self.halide_vars)}"
)
self.halide_vars[sym] = next_size
added_sym_size.append((sym, next_size))
divisor *= next_size
new_sizes = [n.length for n in nodes if eq(n.divisor, divisor)]
handled_count += len(new_sizes)
prior_len = len(sizes_to_add)
sizes_to_add = [
sympy.simplify(s / next_size)
for s in sizes_to_add
if not eq(s, next_size)
]
assert len(sizes_to_add) < prior_len or prior_len == 0
sizes_to_add.extend(new_sizes)
# create a mapping to the new set of symbols in self.index_replacements
for node in nodes:
try:
idx = 0
divisor = 1
while not eq(node.divisor, divisor):
sym, size = added_sym_size[idx]
idx += 1
divisor *= size
length = 1
expr = sympy.S.Zero
while not eq(node.length, length):
sym, size = added_sym_size[idx]
idx += 1
expr += length * sym
length *= size
self.index_replacements[node.symbol()] = expr
except IndexError:
assert had_fallback
full_index = sympy.S.Zero
stride = sympy.S.One
for sym, size in added_sym_size:
full_index += stride * sym
stride *= size
self.index_replacements[node.symbol()] = (
V.graph.sizevars.simplify_with_ranges(
ModularIndexing(full_index, node.divisor, node.length),
self.halide_vars, # type: ignore[arg-type]
)
)
# codegen the variable definitions
for sym in self.halide_vars:
self.indexing_code.writeline(f"{sym} = hl.Var({sym.name!r})")
if self.reduction_renames:
self.codegen_rdom(
"rdom",
{rv: self.halide_vars[v] for v, rv in self.reduction_renames.items()},
)
def setup_dom_indexing(self):
"""RDom based indexing uses explicit iteration ranges for Func updates"""
prefix = "i" if self.inside_reduction else "o"
if prefix in self.dom_renames:
return self.dom_renames[prefix]
renames = {}
for var in self.halide_vars:
if not self.inside_reduction and var in self.reduction_renames:
continue
m = re.match(r"^h(\d+)$", var.name)
assert m
renames[var] = sympy_index_symbol(f"h{prefix}{m.group(1)}")
self.codegen_rdom(
f"{prefix}dom", {rv: self.halide_vars[v] for v, rv in renames.items()}
)
self.dom_renames[prefix] = renames
return renames
def codegen_rdom(self, name, vars):
rsizes = [
f"hl.Range(0, {self.kexpr(self.rename_indexing(size))})"
for size in vars.values()
]
self.indexing_code.writeline(f"{name} = hl.RDom([{', '.join(rsizes)}])")
for i, rsym in enumerate(vars.keys()):
self.indexing_code.writeline(f"{rsym} = {name}[{i}]")
def prepare_indexing(
self,
index: sympy.Expr,
):
index = super().prepare_indexing(index)
index = sympy_subs(index, self.index_replacements)
return V.graph.sizevars.simplify_with_ranges(index, self.halide_vars) # type: ignore[arg-type]
def sym_size(self, sym):
"""The size of an index symbol"""
if symbol_is_type(sym, SymT.TMP):
return self.lookup_cse_var(sym.name).indirect_indexing_size
return self.halide_vars[sym]
def indexing_to_dimensions(self, var: str, index: sympy.Expr, is_store: bool):
"""Convert address-based indexing into dimensions using self.halide_vars"""
symbols = []
for sym in sorted(index.free_symbols, key=lambda x: x.name): # type: ignore[attr-defined]
if symbol_is_type(sym, (SymT.HALIDE, SymT.TMP)):
symbols.append(sym)
else:
assert symbol_is_type(
sym,
(
SymT.UNBACKED_INT,
SymT.SIZE,
SymT.PRECOMPUTED_SIZE,
),
), sym
# group the expression by variables used
offset = sympy.S.Zero
split_expr = dict.fromkeys(symbols, sympy.S.Zero)
split_failed: list[tuple[list[sympy.Symbol], sympy.Expr]] = []
index = sympy.expand(self.rename_indexing(index))
for part in index.args if isinstance(index, sympy.Add) else [index]:
part_vars = [v for v in part.free_symbols if v in split_expr]
if len(part_vars) == 0:
offset += part
elif len(part_vars) == 1:
split_expr[part_vars[0]] += part
else:
new_split_failed = []
for i in range(len(split_failed)):
assert split_failed[i] is not None
other_vars, other_part = split_failed[i]
if OrderedSet(other_vars) & OrderedSet(part_vars):
part_vars.extend([v for v in other_vars if v not in part_vars])
part += other_part
else:
new_split_failed.append((other_vars, other_part))
split_failed = [*new_split_failed, (part_vars, part)]
def expr_to_dimension(expr, syms):
expr = sympy.factor(expr)
if len(syms) == 1:
stride_wild = sympy.Wild("wild", exclude=symbols)
m = expr.match(stride_wild * syms[0])
if m:
return DimensionInfo(
syms[0], self.sym_size(syms[0]), m[stride_wild]
)
assert not is_store, expr
length = sympy.simplify(
sympy_subs(expr, {sym: self.sym_size(sym) - 1 for sym in syms}) + 1
)
stride = sympy.S.One
if isinstance(expr, sympy.Mul):
for term in expr.args:
if isinstance(term, sympy.Integer):
stride *= term
expr = sympy.simplify(expr / term)
length = sympy.simplify(sympy.ceiling(length / term))
return DimensionInfo(expr, length, stride)
# try to turn each group into a strided access
dims = []
for syms, expr in split_failed:
for v in syms:
expr += split_expr.pop(v)
dims.append(expr_to_dimension(expr, syms))
for sym, expr in split_expr.items():
dims.append(expr_to_dimension(expr, [sym]))
dims.sort(key=lambda d: V.graph.sizevars.size_hint(d.stride, fallback=inf)) # type: ignore[arg-type]
if not dims: # scalar load/store
if self.has_indirect_indexing:
# workaround https://github.com/halide/Halide/issues/8338
dims.append(DimensionInfo(sympy.S.Zero, 1, 1))
elif not V.graph.sizevars.statically_known_equals(dims[0].stride, 1):
# Halide assumes dimension 0 is stride == 1, so add a dummy dimension
dims.insert(
0, DimensionInfo(sympy.S.Zero, 1 if is_store else dims[0].stride, 1)
)
if dims and not is_store:
if var in self.buffer_offsets and V.graph.sizevars.statically_known_geq(
offset, self.buffer_offsets[var]
):
# reuse the existing offset to avoid needing an input alias
self.apply_offset_to_dimension(dims, offset - self.buffer_offsets[var])
offset = self.buffer_offsets[var]
elif V.graph.sizevars.statically_known_gt(
offset, 0
): # TODO(jansel): negative offsets
# roll the offset into the dimensions for cleaner indexing
self.apply_offset_to_dimension(dims, offset)
offset = 0
orig_var = var
for i in itertools.count():
if self.install_dims(var, dims, offset, is_store):
return var, dims
assert not is_store
var = f"{orig_var}_view{i}"
if var not in self.buffer_aliases[orig_var]:
self.buffer_aliases[orig_var].append(var)
def install_dims(self, var, dims, offset, is_store):
"""Try to set self.buffer_dimensions[var], return True on success"""
if var not in self.buffer_dimensions:
self.buffer_dimensions[var] = dims
self.buffer_offsets[var] = offset
return True
if self.buffer_offsets[var] != offset or len(
self.buffer_dimensions[var]
) != len(dims):
return False
if is_store:
return self.buffer_dimensions[var] == dims
for old, new in zip(self.buffer_dimensions[var], dims):
if old.stride != new.stride:
return False
if old.size != new.size or old.expr != new.expr:
old.size = V.graph.sizevars.evaluate_max(old.size, new.size)
old.expr = None
return True
def apply_offset_to_dimension(self, dims, offset):
if offset == 0:
return
for i in reversed(range(len(dims))):
if dims[i].stride == 1 or V.graph.sizevars.statically_known_geq(
offset, dims[i].stride
):
part = FloorDiv(offset, dims[i].stride)
offset -= part * dims[i].stride
dims[i].expr += part
assert offset == 0
def used_dims_from_index(self, index: sympy.Expr):
"""Detect which range trees are used to populate HalideCSEVariable.used_dims"""
used_dims = OrderedSet[sympy.Symbol]()
for sym in index.free_symbols:
assert isinstance(sym, sympy.Symbol)
if symbol_is_type(sym, SymT.TMP):
# indirect indexing
cse_var = self.lookup_cse_var(sym.name)
assert (
isinstance(cse_var, HalideCSEVariable)
and cse_var.used_dims is not None
)
used_dims.update(cse_var.used_dims)
elif symbol_is_type(sym, SymT.HALIDE):
used_dims.add(sym)
elif symbol_is_type(
sym, (SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX)
):
pass
else:
raise NotImplementedError(f"unhandled symbol {sym}")
return self.sort_used_dims(used_dims)
def sort_used_dims(self, used_dims):
assert all(isinstance(x, sympy.Expr) for x in used_dims)
ordered = [
sym
for sym in itertools.chain(
self.halide_vars, self.reduction_renames.values()
)
if sym in used_dims
]
assert len(ordered) == len(used_dims)
return ordered
def make_index_str(self, dims, replacements=None, zero_vars=False):
index_str = ", ".join(d.index_str(replacements, zero_vars) for d in dims)
if len(dims) == 0:
index_str = "()"
elif len(dims) == 1:
# workaround for https://github.com/halide/Halide/issues/8299
index_str = f"{index_str},"
return index_str
def load(self, name: str, index: sympy.Expr):
"""Codegen a load from an InputBuffer"""
var = self.args.input(name)
index = self.prepare_indexing(index)
var, dims = self.indexing_to_dimensions(var, index, False)
line = f"{var}[{self.make_index_str(dims)}]"
dtype = V.graph.get_dtype(name)
if dtype in (torch.float16, torch.bfloat16):
dtype = torch.float32
line = f"hl.cast(hl.Float(32), {line})"
if self._load_mask:
assert (
isinstance(self._load_mask, HalideCSEVariable)
and self._load_mask.used_dims is not None
)
used_dims = OrderedSet(
(*self.used_dims_from_index(index), *self._load_mask.used_dims)
)
result = self.newfunc(self.sort_used_dims(used_dims))
if result.used_dims:
self.body.writeline(f"{result.name}_mask = hl.RDom([hl.Range(0, 1)])")
self.body.writeline(f"{result.name}_mask.where({self._load_mask})")
other = self.kexpr(self._load_other or 0) # type: ignore[arg-type]
self.body.writeline(
f"{result} = hl.cast({halide_type(dtype)}, {other})"
)
self.body.writeline(
f"{result} = {line} + hl.cast({halide_type(dtype)}, {result.name}_mask)"
)
else:
# scalar case
self.body.writeline(
f"{result} = hl.select({self._load_mask}, {line}, hl.cast({halide_type(dtype)}, 0))"
)
return result
else:
return self.genfunc(line, self.used_dims_from_index(index))
def lookup_cse_var(self, name: str):
return self.cse.varname_map[re.sub(r"\[.*", "", name)]
def store(
self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None
) -> None:
"""Codegen a store to an OutputBuffer"""
assert isinstance(value, HalideCSEVariable)
var = self.args.output(name)
index = self.prepare_indexing(index)
var, dims = self.indexing_to_dimensions(var, index, True)
if self.is_indirect_indexing(index) or mode is not None:
replacements = self.setup_dom_indexing()
index_str = self.make_index_str(dims, replacements)
value_str = value.subs_str(replacements)
undef_dims = (", ".join(["hl.Var()"] * len(dims))) or "()"
self.body.writeline(
DeferredLine(name, f"{var}[{undef_dims}] = hl.undef({var}.type())")
)
else:
index_str = self.make_index_str(dims, zero_vars=True)
value_str = str(value)
dtype = V.graph.get_dtype(name)
if mode is None:
line = f"{var}[{index_str}] = hl.cast({halide_type(dtype)}, {value_str})"
elif mode == "atomic_add":
line = f"{var}[{index_str}] += hl.cast({halide_type(dtype)}, {value_str})"
else:
raise NotImplementedError(f"store mode={mode}")
self.body.writeline(DeferredLine(name, line))
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: Union[CSEVariable, tuple[CSEVariable, ...]],
) -> Union[CSEVariable, tuple[CSEVariable, ...]]:
"""Codegen a reduction operation"""
assert self.inside_reduction
assert not self._load_mask
cache_key = (src_dtype, reduction_type, value)
if cache_key in self.cse.reduction_cache:
return self.cse.reduction_cache[cache_key]
if isinstance(value, tuple):
assert reduction_type == "welford_combine"
self.cse.reduction_cache[cache_key] = result_tuple = (
self.welford_combine_impl(*value)
)
return result_tuple
assert isinstance(value, HalideCSEVariable) and value.used_dims is not None
reduction_vars = OrderedSet(self.reduction_renames)
result_var = self.newfunc(
[v for v in value.used_dims if v not in reduction_vars],
)
if reduction_vars - OrderedSet(value.used_dims):
value = self.genfunc(
f"{value}",
self.sort_used_dims(OrderedSet((*value.used_dims, *reduction_vars))),
shape=value.shape,
)
value_str = value.subs_str(self.reduction_renames)
default = ir.Reduction.default_accumulator(reduction_type, src_dtype)
acc_type = halide_acc_type(dtype)
if reduction_type in ("argmax", "argmin"):
index = f"{result_var.name}_{reduction_type}"
self.body.writeline(f"{index} = hl.{reduction_type}(rdom, {value_str})")
# turn the N-D argmax index into a 1-D one
parts = []
stride = 1
for i, sym in enumerate(self.reduction_renames):
# pyrefly: ignore [bad-argument-type]
parts.append(f"{index}[{i}]")
if stride != 1:
# pyrefly: ignore [unsupported-operation]
parts[-1] += f"*{stride}"
stride *= self.halide_vars[sym]
self.body.writeline(f"{result_var} = {' + '.join(parts)}")
elif reduction_type == "welford_reduce":
# TODO(jansel): implement welford_reduce without fallback
result_var = self.welford_reduce_fallback(dtype, value)
else:
combine_fn = get_reduction_combine_fn(reduction_type, acc_type)
with V.set_ops_handler(AddParenHandler(HalideOverrides())):
combine_str = combine_fn(result_var, value_str) # type: ignore[arg-type]
default_str = f"hl.cast({acc_type}, {halide_constant(default)})"
self.body.writeline(f"{result_var} = {default_str}")
self.body.writeline(f"{result_var} = {combine_str}")
self.cse.reduction_cache[cache_key] = result_var
return result_var
def welford_combine_impl(self, mean, m2, weight):
assert isinstance(mean, HalideCSEVariable) and mean.used_dims is not None
assert isinstance(m2, HalideCSEVariable) and m2.used_dims is not None
assert isinstance(weight, HalideCSEVariable) and weight.used_dims is not None
used_dims = OrderedSet(
(*mean.used_dims, *m2.used_dims, *weight.used_dims) or self.halide_vars
)
used_dims -= OrderedSet(self.reduction_renames)
result_var = self.newfunc(self.sort_used_dims(used_dims))
default = [f"hl.cast({x.name}.type(), 0)" for x in (mean, m2, weight)]
pfx = result_var.name
self.body.writeline(f"{result_var} = hl.Tuple([{', '.join(default)}])")
self.body.writeline(f"{pfx}_mean_1 = {result_var}[0]")
self.body.writeline(f"{pfx}_m2_1 = {result_var}[1]")
self.body.writeline(f"{pfx}_weight_1 = {result_var}[2]")
self.body.writeline(f"{pfx}_mean_2 = {mean.subs_str(self.reduction_renames)}")
self.body.writeline(f"{pfx}_m2_2 = {m2.subs_str(self.reduction_renames)}")
self.body.writeline(
f"{pfx}_weight_2 = {weight.subs_str(self.reduction_renames)}"
)
self.body.writeline(f"{pfx}_delta = {pfx}_mean_2 - {pfx}_mean_1")
self.body.writeline(f"{pfx}_new_weight = {pfx}_weight_1 + {pfx}_weight_2")
self.body.writeline(
f"{pfx}_w2_over_w = hl.select({pfx}_new_weight == 0.0, 0.0, {pfx}_weight_2 / {pfx}_new_weight)"
)
update = [
f"{pfx}_mean_1 + {pfx}_delta * {pfx}_w2_over_w",
f"{pfx}_m2_1 + {pfx}_m2_2 + {pfx}_delta * {pfx}_delta * {pfx}_weight_1 * {pfx}_w2_over_w",
f"{pfx}_new_weight",
]
self.body.writeline(f"{result_var} = hl.Tuple([{', '.join(update)}])")
unpacked = []
for i in range(3):
unpacked.append(self.newfunc(result_var.used_dims))
self.body.writeline(f"{unpacked[-1]} = {result_var}[{i}]")
return tuple(unpacked)
def scan(
self,
dtypes: tuple[torch.dtype, ...],
combine_fn: Callable[
[tuple[CSEVariable, ...], tuple[CSEVariable, ...]], tuple[CSEVariable, ...]
],
values_orig: tuple[CSEVariable, ...],
) -> tuple[CSEVariable, ...]:
assert self.inside_reduction
assert len(dtypes) == len(values_orig)
values: list[HalideCSEVariable] = []
all_used_dims = OrderedSet[sympy.Symbol]()
for value in values_orig:
assert isinstance(value, HalideCSEVariable) and value.used_dims is not None
if OrderedSet(value.used_dims) & OrderedSet(self.reduction_renames):
values.append(value)
else:
values.append(
self.genfunc(
f"{value}",
[*value.used_dims, [*self.reduction_renames][:1]],
shape=value.shape,
)
)
all_used_dims.update(value.used_dims)
result_var = self.newfunc(self.sort_used_dims(all_used_dims))
assert result_var.used_dims and OrderedSet(result_var.used_dims) & OrderedSet(
self.reduction_renames
)
initial = [
f"hl.cast({halide_acc_type(dtype)}, {value})"
for dtype, value in zip(dtypes, values)
]
length = self.kexpr(self.rename_indexing(self.range_trees[-1].numel))
scan_dom = f"{result_var.name}_rdom"
scan = f"{scan_dom}.x"
self.body.writeline(f"{scan_dom} = hl.RDom([hl.Range(1, {length})])")
assert len(self.reduction_renames) == 1, (
"multi-dimensional scan not implemented"
)
(scan_var,) = [*self.reduction_renames] # type: ignore[misc]
scan_renames_cur = {scan_var: sympy_index_symbol(scan)}
scan_renames_pri = {scan_var: sympy_index_symbol(scan) - 1}
if len(values) == 1:
def maybe_tuple(x):
return x[0]
read_left = [result_var.subs_str(scan_renames_pri)]
read_right = [result_var.subs_str(scan_renames_cur)]
else:
def maybe_tuple(x):
return f"hl.Tuple([{', '.join(x)}])"
read_left = [
result_var.subs_str(scan_renames_pri) + f"[{i}]"
for i in range(len(values))
]
read_right = [
result_var.subs_str(scan_renames_cur) + f"[{i}]"
for i in range(len(values))
]
self.body.writeline(f"{result_var} = {maybe_tuple(initial)}")
# Disable CSE for update fn
with V.set_ops_handler(AddParenHandler(HalideOverrides())):
combine_str = combine_fn(read_left, read_right) # type: ignore[arg-type]
self.body.writeline(
f"{result_var.subs_str(scan_renames_cur)} = {maybe_tuple(combine_str)}"
)
if len(values) == 1:
return (result_var,)
unpack_vars = [self.newfunc(self.sort_used_dims(all_used_dims)) for _ in values]
for i, v in enumerate(unpack_vars):
self.body.writeline(f"{v} = {result_var}[{i}]")
return tuple(unpack_vars)
def genfunc(
self,
line,
used_dims,
*,
bounds=ValueRanges.unknown(),
shape: BlockShapeType = None,
) -> HalideCSEVariable:
var = self.cse.generate(self.body, line, bounds=bounds, shape=shape)
assert isinstance(var, HalideCSEVariable)
var.used_dims = used_dims
return var
def newfunc(self, used_dims, *, shape: BlockShapeType = None) -> HalideCSEVariable:
var = self.cse.newvar(shape=shape)
assert isinstance(var, HalideCSEVariable)
var.used_dims = used_dims
return var
def halide_buffer_numel(self, name: str):
"""
We map all tensors to 1D buffers in Halide since Halide has trouble representing some strides that PyTorch
supports. If there are gaps in the underlying layout the numel we pass to Halide includes the gaps while
PyTorch's numel excludes them.
"""
return V.graph.get_buffer(name).get_layout().storage_size()
def halide_argdefs(self):
"""
Halide requires scalar inputs before outputs, so need to reorder args.
"""
def arg_order(arg_tuple):
_call_str, arg = arg_tuple
if isinstance(arg, SizeArg):
return 1 # this would normally be at the end, move it to middle
elif "out_ptr" in arg.name:
return 2
else:
assert "in_ptr" in arg.name
return 0
result: list[tuple[Optional[str], KernelArgType]] = []
_, a, b, _ = self.args.python_argdefs()
for call_str, arg in sorted(zip(a, b), key=arg_order):
result.append((call_str, arg))
if isinstance(arg, TensorArg):
assert arg.offset == 0 and arg.alias_of is None
result.extend(
(
None,
TensorArg(
alias,
arg.buffer,
arg.dtype,
arg.offset,
alias_of=arg.name,
),
)
for alias in self.buffer_aliases.get(arg.name, ())
)
return result
def halide_kernel_meta(self) -> HalideMeta:
"""Compute metadata required by codecache.py"""
argtypes = []
for _, arg in self.halide_argdefs():
if isinstance(arg, SizeArg):
shape = None
stride = None
offset = None
dtype = "long"
else:
shape = [
cexpr(self.rename_indexing(x.size))
for x in self.buffer_dimensions[arg.name]
]
stride = [
cexpr(self.rename_indexing(x.stride))
for x in self.buffer_dimensions[arg.name]
]
assert len(shape) == len(stride)
offset = cexpr(self.buffer_offsets[arg.name])
dtype = f"{DTYPE_TO_CPP[arg.dtype]}*"
argtypes.append(
HalideInputSpec(
dtype,
arg.name,
shape=shape,
stride=stride,
offset=offset,
alias_of=arg.alias_of,
)
)
current_device = V.graph.get_current_device_or_throw()
if current_device.type == "cpu":
target = [config.halide.cpu_target]
scheduler = config.halide.scheduler_cpu
scheduler_flags = {
"parallelism": parallel_num_threads(),
}
cuda_device = None
else:
assert current_device.type == "cuda", "only cpu/cuda supported"
assert current_device.index <= 0, "only default device supported"
target = [config.halide.gpu_target]
scheduler = config.halide.scheduler_cuda
capability = torch.cuda.get_device_properties(current_device)
if "cuda_capability" not in target[0]:
for major, minor in [(8, 6), (8, 0), (7, 5), (7, 0), (6, 1)]:
if capability.major >= major and capability.minor >= minor:
target.append(f"cuda_capability_{major}{minor}")
break
target.append("user_context")
scheduler_flags = {
"parallelism": capability.multi_processor_count,
# TODO(jansel): explore other flags, see:
# grep parser.parse ~/Halide/src/autoschedulers/anderson2021/AutoSchedule.cpp
}
cuda_device = max(0, current_device.index)
# strict_float is requires for correctness
target.append("strict_float")
# without this we will initialize cuda once per kernel and hit errors
target.append("no_runtime")
if not config.halide.asserts:
target.append("no_asserts")
if config.halide.debug:
target.append("debug")
if "64" in self.index_dtype:
# TODO(jansel): it is unclear if this does anything, since input sizes are still int32
target.append("large_buffers")
return HalideMeta(
argtypes,
target="-".join(target),
scheduler=scheduler,
scheduler_flags=scheduler_flags, # type: ignore[arg-type]
cuda_device=cuda_device,
)
def codegen_kernel(self, name=None):
"""Called at the end to generate a final kernel string"""
if self.args.inplace_buffers:
raise Unsupported("inplace_buffers")
meta = self.halide_kernel_meta() # ensure needed args are added early
code = IndentedBuffer()
code.splice(
"""
import halide as hl
from torch._inductor.runtime import halide_helpers
from math import inf, nan
@hl.generator(name="kernel")
class Kernel:
""",
strip=True,
)
code.do_indent()
for _, arg in self.halide_argdefs():
if isinstance(arg, SizeArg):
code.writeline(f"{arg.name} = hl.InputScalar({self.index_dtype})")
else:
assert arg.buffer, arg
argcls = "hl.OutputBuffer" if "out" in arg.name else "hl.InputBuffer"
argtype = halide_type(arg.dtype)
ndim = len(self.buffer_dimensions[arg.name])
code.writeline(f"{arg.name} = {argcls}({argtype}, {ndim})")
code.splice(
"""
def generate(g):
"""
)
code.do_indent()
for _, arg in self.halide_argdefs():
code.writeline(f"{arg.name} = g.{arg.name}")
for old, new in self.args.aliases():
code.writeline(f"{old} = {new}")
code.splice(self.indexing_code)
def update_index(m):
var = cast(HalideCSEVariable, self.cse.varname_map[m.group(1)])
assert var.used_dims is not None, var
return str(var)
for line in self.body._lines:
if isinstance(line, str):
# fill in missing indices
line = HalideCSEVariable.undefined_re.sub(update_index, line)
code.writeline(line)
code.writeline("")
code.writeline("assert g.using_autoscheduler()")
for _, arg in self.halide_argdefs():
# fallback=1 below because halide requires buffers to be at least as large as the estimates
# This causes crashes if our estimate is greater than the vector length
# https://github.com/halide/Halide/issues/3103
if isinstance(arg, SizeArg):
hint = V.graph.sizevars.size_hint(arg.expr, fallback=1)
code.writeline(f"{arg.name}.set_estimate({hint})")
else:
dims = self.buffer_dimensions[arg.name]
range_hints = []
for i, dim in enumerate(dims):
hint = self._autoscheduler_workarounds(
V.graph.sizevars.size_hint(dim.size, fallback=1), dims
)
# pyrefly: ignore [bad-argument-type]
range_hints.append(f"hl.Range(0, {hint})")
if "out" not in arg.name:
code.writeline(f"{arg.name}.dim({i}).set_min(0)")
try:
code.writeline(
f"{arg.name}.dim({i}).set_stride({int(dim.stride)})"
)
except TypeError:
pass # not integer
try:
code.writeline(
f"{arg.name}.dim({i}).set_extent({int(dim.size)})"
)
except TypeError:
pass # not integer
code.writeline(f"{arg.name}.set_estimates([{', '.join(range_hints)}])")
code.do_unindent(2)
code.splice(
"""
if __name__ == "__main__":
hl.main()
""".rstrip(),
)
if meta.scheduler:
code.splice(
f"""
else:
hl.load_plugin({HalideCodeCache.find_libautoschedule(meta.scheduler)!r})
target = hl.Target({meta.target!r})
autoscheduler = hl.AutoschedulerParams({meta.scheduler!r}, {meta.scheduler_flags!r})
with hl.GeneratorContext(target, autoscheduler):
gen = Kernel()
pipeline = gen._build_pipeline()
# gen.compile_to_callable() does not run the autoscheduler
pipeline.apply_autoscheduler(target, autoscheduler)
kernel = pipeline.compile_to_callable([
gen._get_input_parameter(a.name)._to_argument()
for a in gen._get_arginfos()
if a.dir == hl.ArgInfoDirection.Input
], target)
""",
strip=True,
)
else:
code.splice(
f"""
else:
with hl.GeneratorContext(hl.Target({meta.target!r})):
kernel = Kernel().compile_to_callable()
""",
strip=True,
)
return code.getvalue()
@staticmethod
def _autoscheduler_workarounds(n, dims):
if (
len(dims) == 1
and config.halide.scheduler_cuda == "Anderson2021"
and V.graph.get_current_device_or_throw().type == "cuda"
):
# workaround https://github.com/halide/Halide/issues/8246
n = max(2, n)
return n
def call_kernel(self, name: str, node=None, deallocate_ws: bool = True):
"""Codegen a call to this kernel"""
wrapper = V.graph.wrapper_code
call_args = [f"{n}" for n, arg in self.halide_argdefs() if arg.alias_of is None]
current_device = V.graph.get_current_device_or_throw()
if current_device.type == "cuda":
stream_name = wrapper.write_get_raw_stream(
current_device.index, V.graph.name
)
call_args.append(stream_name)
wrapper.generate_kernel_call(
name,
call_args,
device=current_device,
triton=False,
)
def generate_assert(self, check):
return False # TODO(jansel): support asserts
def check_bounds(
self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool
):
pass # TODO(jansel): support asserts
| HalideKernel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_product_search_query_performance_report.py | {
"start": 15986,
"end": 23456
} | class ____(TestBaseProductSearchQueryPerformanceReport):
stream_name = "product_search_query_performance_report_weekly"
report_file = "product_search_query_performance_report_weekly"
records_number = 5
incremental_report_file = "product_search_query_performance_report_weekly_incremental"
incremental_report_file_with_records_further_cursor = (
"product_search_query_performance_report_weekly_incremental_with_records_further_cursor"
)
report_file_with_records_further_start_date = "product_search_query_performance_report_weekly_with_records_further_start_date"
state_file = "product_search_query_performance_report_state"
state_file_legacy = "product_search_query_performance_report_state"
def mock_report_apis(self):
super().mock_report_apis()
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Weekly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Weekly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Weekly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Weekly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
| TestProductSearchQueryPerformanceReportWeeklyStream |
python | celery__celery | t/unit/utils/test_term.py | {
"start": 266,
"end": 2842
} | class ____:
@pytest.fixture(autouse=True)
def preserve_encoding(self, patching):
patching('sys.getdefaultencoding', 'utf-8')
@pytest.mark.parametrize('name,color', [
('black', term.BLACK),
('red', term.RED),
('green', term.GREEN),
('yellow', term.YELLOW),
('blue', term.BLUE),
('magenta', term.MAGENTA),
('cyan', term.CYAN),
('white', term.WHITE),
])
def test_colors(self, name, color):
assert fg(30 + color) in str(colored().names[name]('foo'))
@pytest.mark.parametrize('name', [
'bold', 'underline', 'blink', 'reverse', 'bright',
'ired', 'igreen', 'iyellow', 'iblue', 'imagenta',
'icyan', 'iwhite', 'reset',
])
def test_modifiers(self, name):
assert str(getattr(colored(), name)('f'))
def test_unicode(self):
assert str(colored().green('∂bar'))
assert colored().red('éefoo') + colored().green('∂bar')
assert colored().red('foo').no_color() == 'foo'
def test_repr(self):
assert repr(colored().blue('åfoo'))
assert "''" in repr(colored())
def test_more_unicode(self):
c = colored()
s = c.red('foo', c.blue('bar'), c.green('baz'))
assert s.no_color()
c._fold_no_color(s, 'øfoo')
c._fold_no_color('fooå', s)
c = colored().red('åfoo')
assert c._add(c, 'baræ') == '\x1b[1;31m\xe5foo\x1b[0mbar\xe6'
c2 = colored().blue('ƒƒz')
c3 = c._add(c, c2)
assert c3 == '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m'
def test_read_as_base64(self):
test_data = b"The quick brown fox jumps over the lazy dog"
with NamedTemporaryFile(mode='wb') as temp_file:
temp_file.write(test_data)
temp_file.seek(0)
temp_file_path = temp_file.name
result = _read_as_base64(temp_file_path)
expected_result = b64encode(test_data).decode('ascii')
assert result == expected_result
@pytest.mark.parametrize('is_tty, iterm_profile, expected', [
(True, 'test_profile', True),
(False, 'test_profile', False),
(True, None, False),
])
@patch('sys.stdin.isatty')
@patch.dict(os.environ, {'ITERM_PROFILE': 'test_profile'}, clear=True)
def test_supports_images(self, mock_isatty, is_tty, iterm_profile, expected):
mock_isatty.return_value = is_tty
if iterm_profile is None:
del os.environ['ITERM_PROFILE']
assert supports_images() == expected
| test_colored |
python | gevent__gevent | src/greentest/3.12/test_weakref.py | {
"start": 34839,
"end": 39033
} | class ____(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
| WeakMethodTestCase |
python | ray-project__ray | python/ray/tests/test_actor_group.py | {
"start": 151,
"end": 2932
} | class ____:
def return_arg(self, arg):
return arg
def get_actor_metadata(self):
return "metadata"
# Use default filterwarnings behavior for this test
@pytest.mark.filterwarnings("default")
def test_actor_creation(ray_start_2_cpus):
assert ray.available_resources()["CPU"] == 2
with warnings.catch_warnings(record=True) as w:
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
assert any(
"use ray.util.multiprocessing" in str(warning.message) for warning in w
)
assert len(ag) == 2
time.sleep(1)
# Make sure both CPUs are being used by the actors.
assert "CPU" not in ray.available_resources()
ag.shutdown()
def test_actor_creation_num_cpus(ray_start_2_cpus):
assert ray.available_resources()["CPU"] == 2
ag = ActorGroup(actor_cls=DummyActor, num_cpus_per_actor=2)
time.sleep(1)
assert len(ag) == 1
# Make sure both CPUs are being used by the actor.
assert "CPU" not in ray.available_resources()
ag.shutdown()
@pytest.mark.parametrize(
"ray_start_2_cpus",
[{"include_dashboard": True}],
indirect=True,
)
def test_actor_shutdown(ray_start_2_cpus):
assert ray.available_resources()["CPU"] == 2
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
time.sleep(1)
assert "CPU" not in ray.available_resources()
assert len(list_actors()) == 2
ag.shutdown()
time.sleep(1)
assert ray.available_resources()["CPU"] == 2
with pytest.raises(RuntimeError):
ag.return_arg.remote(1)
def test_actor_restart(ray_start_2_cpus):
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
with pytest.raises(RuntimeError):
ag.start()
# Avoid race condition.
time.sleep(1)
ag.shutdown(0)
ag.start()
ray.get(ag.return_arg.remote(1))
def test_actor_method(ray_start_2_cpus):
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
assert ray.get(ag.return_arg.remote(1)) == [1, 1]
def test_actor_metadata(ray_start_2_cpus):
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
assert ag.actor_metadata == ["metadata", "metadata"]
def test_actor_method_fail(ray_start_2_cpus):
ag = ActorGroup(actor_cls=DummyActor, num_actors=2)
with pytest.raises(TypeError):
ag.return_arg(1)
with pytest.raises(AttributeError):
ag.non_existent_method.remote()
def test_bad_resources(ray_start_2_cpus):
with pytest.raises(ValueError):
ActorGroup(actor_cls=DummyActor, num_actors=-1)
with pytest.raises(ValueError):
ActorGroup(actor_cls=DummyActor, num_actors=-1)
with pytest.raises(ValueError):
ActorGroup(actor_cls=DummyActor, num_actors=-1)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| DummyActor |
python | joerick__pyinstrument | pyinstrument/renderers/speedscope.py | {
"start": 1229,
"end": 1495
} | class ____:
"""
Data class to store speedscope's concept of a "profile".
"""
name: str
events: list[SpeedscopeEvent]
end_value: float
start_value: float = 0.0
type: str = "evented"
unit: str = "seconds"
@dataclass
| SpeedscopeProfile |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 23642,
"end": 25619
} | class ____(Expr):
""" A Meijer G-function. """
def __new__(cls, an, ap, bm, bq):
obj = super().__new__(cls)
obj.an = Tuple(*list(map(expand, an)))
obj.ap = Tuple(*list(map(expand, ap)))
obj.bm = Tuple(*list(map(expand, bm)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.an, self.ap, self.bm, self.bq)
def _hashable_content(self):
return super()._hashable_content() + self.args
def __call__(self, z):
return meijerg(self.an, self.ap, self.bm, self.bq, z)
def compute_buckets(self):
"""
Compute buckets for the fours sets of parameters.
Explanation
===========
We guarantee that any two equal Mod objects returned are actually the
same, and that the buckets are sorted by real part (an and bq
descendending, bm and ap ascending).
Examples
========
>>> from sympy.simplify.hyperexpand import G_Function
>>> from sympy.abc import y
>>> from sympy import S
>>> a, b = [1, 3, 2, S(3)/2], [1 + y, y, 2, y + 3]
>>> G_Function(a, b, [2], [y]).compute_buckets()
({0: [3, 2, 1], 1/2: [3/2]},
{0: [2], y: [y, y + 1, y + 3]}, {0: [2]}, {y: [y]})
"""
dicts = pan, pap, pbm, pbq = [defaultdict(list) for i in range(4)]
for dic, lis in zip(dicts, (self.an, self.ap, self.bm, self.bq)):
for x in lis:
dic[_mod1(x)].append(x)
for dic, flip in zip(dicts, (True, False, False, True)):
for m, items in dic.items():
x0 = items[0]
items.sort(key=lambda x: x - x0, reverse=flip)
dic[m] = items
return tuple([dict(w) for w in dicts])
@property
def signature(self):
return (len(self.an), len(self.ap), len(self.bm), len(self.bq))
# Dummy variable.
_x = Dummy('x')
| G_Function |
python | jazzband__django-pipeline | tests/utils.py | {
"start": 204,
"end": 599
} | class ____(override_settings):
def __init__(self, **kwargs):
if django.VERSION[:2] >= (1, 10):
# Django 1.10's override_settings inherits from TestContextDecorator
# and its __init__ method calls its superclass' __init__ method too,
# so we must do the same.
super().__init__()
self.options = {"PIPELINE": kwargs}
| pipeline_settings |
python | openai__openai-python | src/openai/cli/_api/audio.py | {
"start": 1976,
"end": 3757
} | class ____:
@staticmethod
def transcribe(args: CLITranscribeArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.transcriptions.create(
file=(args.file, buffer_reader),
model=args.model,
language=args.language or omit,
temperature=args.temperature or omit,
prompt=args.prompt or omit,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)
@staticmethod
def translate(args: CLITranslationArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.translations.create(
file=(args.file, buffer_reader),
model=args.model,
temperature=args.temperature or omit,
prompt=args.prompt or omit,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)
| CLIAudio |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 22105,
"end": 22898
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso
| YosoPredictionHeadTransform |
python | pandas-dev__pandas | pandas/tests/indexes/test_indexing.py | {
"start": 5353,
"end": 6980
} | class ____:
def test_get_loc_non_hashable(self, index):
with pytest.raises(InvalidIndexError, match="[0, 1]"):
index.get_loc([0, 1])
def test_get_loc_non_scalar_hashable(self, index):
# GH52877
from enum import Enum
class E(Enum):
X1 = "x1"
assert not is_scalar(E.X1)
exc = KeyError
msg = "<E.X1: 'x1'>"
if isinstance(
index,
(
DatetimeIndex,
TimedeltaIndex,
PeriodIndex,
IntervalIndex,
),
):
# TODO: make these more consistent?
exc = InvalidIndexError
msg = "E.X1"
with pytest.raises(exc, match=msg):
index.get_loc(E.X1)
def test_get_loc_generator(self, index):
exc = KeyError
if isinstance(
index,
(
DatetimeIndex,
TimedeltaIndex,
PeriodIndex,
IntervalIndex,
MultiIndex,
),
):
# TODO: make these more consistent?
exc = InvalidIndexError
with pytest.raises(exc, match="generator object"):
# MultiIndex specifically checks for generator; others for scalar
index.get_loc(x for x in range(5))
def test_get_loc_masked_duplicated_na(self):
# GH#48411
idx = Index([1, 2, NA, NA], dtype="Int64")
result = idx.get_loc(NA)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
| TestGetLoc |
python | huggingface__transformers | src/transformers/generation/continuous_batching/cache_manager.py | {
"start": 11812,
"end": 15444
} | class ____(CacheAllocator):
"""Cache manager for a group of full attention layers."""
def __init__(self, index: int, block_size: int) -> None:
"""Initializes the cache manager for a group of full attention layers.
Args:
- index: the index of the associated layer group
- block_size: the size of the blocks in the cache
"""
self._index = index
self.block_size = block_size
self.block_table = {}
def allocate_blocks(self, n_blocks: int, request_id: str, block_manager: BlockManager) -> int | None:
"""Allocate (n_blocks) for a given (request_id) using the (block_manager). Returns the number of blocks
allocated if successful and None otherwise. For group of full attention layers, we always allocate the number of
requested blocks."""
# Make sure the request_id is in the block table and get the first block id
if request_id not in self.block_table:
self.block_table[request_id] = [] # TODO: check the impact of making this a deque
last_block_id = None
else:
last_block_id = self.block_table[request_id][-1]
# Actual allocation, return early if failed
allocated_blocks = block_manager.get_free_blocks(n_blocks, last_block_id)
if allocated_blocks is None:
return None
self.block_table[request_id].extend(allocated_blocks)
return n_blocks
def get_read_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]:
"""Returns the physical indices of where to read request_id's cache. For a group of full attention layers, we
first write the new cache to the cache tensor and then read the entire cache from the beginning to the end."""
# Retrieve the block table for the request and raise an error if it doesn't exist
block_table = self.block_table.get(request_id)
if block_table is None:
raise ValueError(f"No block table found for request {request_id}")
# Compute the physical indices
physical_indices = []
for i in range(past_length + query_length):
block_idx = i // self.block_size
block_offset = i % self.block_size
physical_index = block_table[block_idx] * self.block_size + block_offset
physical_indices.append(physical_index)
return physical_indices
def get_write_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]:
"""Returns the physical indices for writing to the cache. For a group of full attention layers, we write the new
cache as a continuation of the existing cache for the same request."""
block_table = self.block_table.get(request_id)
if block_table is None:
raise ValueError(f"No block table found for request {request_id}")
# Compute the physical indices
physical_indices = []
for i in range(past_length, past_length + query_length):
block_idx = i // self.block_size
block_offset = i % self.block_size
physical_index = block_table[block_idx] * self.block_size + block_offset
physical_indices.append(physical_index)
return physical_indices
def get_seqlens_k(self, request_id: str, past_length: int, query_length: int) -> tuple[str, int]:
"""Returns the attention type of the cache allocator and the key sequence length for the given request_id."""
seqlens_k = past_length + query_length
return "full_attention", seqlens_k
| FullAttentionCacheAllocator |
python | ray-project__ray | python/ray/serve/tests/test_util.py | {
"start": 2286,
"end": 2543
} | class ____:
def __call__(self, *args):
return "reached decorated_actor"
def gen_func():
@serve.deployment
def f():
pass
return f
def gen_class():
@serve.deployment
class A:
pass
return A
| DecoratedActor |
python | openai__openai-python | src/openai/types/evals/create_eval_jsonl_run_data_source_param.py | {
"start": 1025,
"end": 1285
} | class ____(TypedDict, total=False):
source: Required[Source]
"""Determines what populates the `item` namespace in the data source."""
type: Required[Literal["jsonl"]]
"""The type of data source. Always `jsonl`."""
| CreateEvalJSONLRunDataSourceParam |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 119163,
"end": 120231
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("model_service.Model.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = DeleteModelVersionOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_id=TEST_MODEL_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.delete_model_version.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_id=TEST_MODEL_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAIDeleteModelVersionOperator |
python | sqlalchemy__sqlalchemy | examples/versioned_history/test_versioning.py | {
"start": 29825,
"end": 29901
} | class ____(TestVersioning, unittest.TestCase):
pass
| TestVersioningUnittest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 276027,
"end": 277346
} | class ____(
ConditionalValueDefstringnullExprRef
):
"""
ConditionalPredicateValueDefstringnullExprRef schema wrapper.
Parameters
----------
test : str, dict, :class:`Predicate`, :class:`FieldGTPredicate`, :class:`FieldLTPredicate`, :class:`FieldGTEPredicate`, :class:`FieldLTEPredicate`, :class:`LogicalOrPredicate`, :class:`ParameterPredicate`, :class:`FieldEqualPredicate`, :class:`FieldOneOfPredicate`, :class:`FieldRangePredicate`, :class:`FieldValidPredicate`, :class:`LogicalAndPredicate`, :class:`LogicalNotPredicate`, :class:`PredicateComposition`
Predicate for triggering the condition
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {
"$ref": "#/definitions/ConditionalPredicate<ValueDef<(string|null|ExprRef)>>"
}
def __init__(
self,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
**kwds,
):
super().__init__(test=test, value=value, **kwds)
| ConditionalPredicateValueDefstringnullExprRef |
python | tiangolo__fastapi | docs_src/security/tutorial003_an.py | {
"start": 968,
"end": 2571
} | class ____(User):
hashed_password: str
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def fake_decode_token(token):
# This doesn't provide any security at all
# Check the next version
user = get_user(fake_users_db, token)
return user
async def get_current_user(token: Annotated[str, Depends(oauth2_scheme)]):
user = fake_decode_token(token)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
return user
async def get_current_active_user(
current_user: Annotated[User, Depends(get_current_user)],
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login(form_data: Annotated[OAuth2PasswordRequestForm, Depends()]):
user_dict = fake_users_db.get(form_data.username)
if not user_dict:
raise HTTPException(status_code=400, detail="Incorrect username or password")
user = UserInDB(**user_dict)
hashed_password = fake_hash_password(form_data.password)
if not hashed_password == user.hashed_password:
raise HTTPException(status_code=400, detail="Incorrect username or password")
return {"access_token": user.username, "token_type": "bearer"}
@app.get("/users/me")
async def read_users_me(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return current_user
| UserInDB |
python | mitmproxy__pdoc | test/testdata/demopackage/child_c.py | {
"start": 46,
"end": 175
} | class ____(child_b.B):
"""This class is defined in .child_c and inherits from .child_b.B."""
def c(self):
return 2
| C |
python | python-pillow__Pillow | src/PIL/Jpeg2KImagePlugin.py | {
"start": 7899,
"end": 13932
} | class ____(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self) -> None:
sig = self.fp.read(4)
if sig == b"\xff\x4f\xff\x51":
self.codec = "j2k"
self._size, self._mode = _parse_codestream(self.fp)
self._parse_comment()
else:
sig = sig + self.fp.read(8)
if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a":
self.codec = "jp2"
header = _parse_jp2_header(self.fp)
self._size, self._mode, self.custom_mimetype, dpi, self.palette = header
if dpi is not None:
self.info["dpi"] = dpi
if self.fp.read(12).endswith(b"jp2c\xff\x4f\xff\x51"):
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
self.fp.seek(length - 2, os.SEEK_CUR)
self._parse_comment()
else:
msg = "not a JPEG 2000 file"
raise SyntaxError(msg)
self._reduce = 0
self.layers = 0
fd = -1
length = -1
try:
fd = self.fp.fileno()
length = os.fstat(fd).st_size
except Exception:
fd = -1
try:
pos = self.fp.tell()
self.fp.seek(0, io.SEEK_END)
length = self.fp.tell()
self.fp.seek(pos)
except Exception:
length = -1
self.tile = [
ImageFile._Tile(
"jpeg2k",
(0, 0) + self.size,
0,
(self.codec, self._reduce, self.layers, fd, length),
)
]
def _parse_comment(self) -> None:
while True:
marker = self.fp.read(2)
if not marker:
break
typ = marker[1]
if typ in (0x90, 0xD9):
# Start of tile or end of codestream
break
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
if typ == 0x64:
# Comment
self.info["comment"] = self.fp.read(length - 2)[2:]
break
else:
self.fp.seek(length - 2, os.SEEK_CUR)
@property # type: ignore[override]
def reduce(
self,
) -> (
Callable[[int | tuple[int, int], tuple[int, int, int, int] | None], Image.Image]
| int
):
# https://github.com/python-pillow/Pillow/issues/4343 found that the
# new Image 'reduce' method was shadowed by this plugin's 'reduce'
# property. This attempts to allow for both scenarios
return self._reduce or super().reduce
@reduce.setter
def reduce(self, value: int) -> None:
self._reduce = value
def load(self) -> Image.core.PixelAccess | None:
if self.tile and self._reduce:
power = 1 << self._reduce
adjust = power >> 1
self._size = (
int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power),
)
# Update the reduce and layers settings
t = self.tile[0]
assert isinstance(t[3], tuple)
t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4])
self.tile = [ImageFile._Tile(t[0], (0, 0) + self.size, t[2], t3)]
return ImageFile.ImageFile.load(self)
def _accept(prefix: bytes) -> bool:
return prefix.startswith(
(b"\xff\x4f\xff\x51", b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a")
)
# ------------------------------------------------------------
# Save support
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# Get the keyword arguments
info = im.encoderinfo
if isinstance(filename, str):
filename = filename.encode()
if filename.endswith(b".j2k") or info.get("no_jp2", False):
kind = "j2k"
else:
kind = "jp2"
offset = info.get("offset", None)
tile_offset = info.get("tile_offset", None)
tile_size = info.get("tile_size", None)
quality_mode = info.get("quality_mode", "rates")
quality_layers = info.get("quality_layers", None)
if quality_layers is not None and not (
isinstance(quality_layers, (list, tuple))
and all(
isinstance(quality_layer, (int, float)) for quality_layer in quality_layers
)
):
msg = "quality_layers must be a sequence of numbers"
raise ValueError(msg)
num_resolutions = info.get("num_resolutions", 0)
cblk_size = info.get("codeblock_size", None)
precinct_size = info.get("precinct_size", None)
irreversible = info.get("irreversible", False)
progression = info.get("progression", "LRCP")
cinema_mode = info.get("cinema_mode", "no")
mct = info.get("mct", 0)
signed = info.get("signed", False)
comment = info.get("comment")
if isinstance(comment, str):
comment = comment.encode()
plt = info.get("plt", False)
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except Exception:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
mct,
signed,
fd,
comment,
plt,
)
ImageFile._save(im, fp, [ImageFile._Tile("jpeg2k", (0, 0) + im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
Image.register_save(Jpeg2KImageFile.format, _save)
Image.register_extensions(
Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]
)
Image.register_mime(Jpeg2KImageFile.format, "image/jp2")
| Jpeg2KImageFile |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/batch_apis.py | {
"start": 1837,
"end": 2050
} | class ____(TypedDict, total=False):
"""Represent the parameters of ``is_authorized_variable`` API in the auth manager."""
method: ResourceMethod
details: VariableDetails | None
| IsAuthorizedVariableRequest |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 3596,
"end": 5652
} | class ____(test_lib.TestCase, parameterized.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
if x.shape[1] == 0:
return x
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)
y_tf_np = self.evaluate(y_tf)
y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
def testSoftmaxExtendType(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
x_f32_tf = constant_op.constant(x_np)
x_bf16_tf = math_ops.cast(x_f32_tf, dtypes.bfloat16)
y_f32_tf = self.evaluate(nn_ops.softmax(x_f32_tf))
y_bf16_tf = self.evaluate(nn_ops.softmax(x_bf16_tf))
expected = math_ops.cast(y_f32_tf, dtypes.bfloat16)
tol = x_shape[1] * 1e-3
self.assertAllClose(y_bf16_tf, expected, rtol=tol, atol=tol)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
x_tf = constant_op.constant(x_np)
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.softmax_v2, [x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
| SoftmaxTest |
python | getsentry__sentry | src/sentry/preprod/analytics.py | {
"start": 1751,
"end": 1970
} | class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
artifact_count: int
@analytics.eventclass("preprod_artifact.api.delete")
| PreprodArtifactApiAdminBatchDeleteEvent |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 35725,
"end": 37148
} | class ____:
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12) # noqa: E712
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
@skip_xp_invalid_arg
| TestVariability |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.