language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 150748,
"end": 152886
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[5]"):
l_x_ = L_x_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
_wrap_for_grad: "f32[5]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
child: "f32[5]" = torch._functorch.eager_transforms._set_tensor_requires_grad(_wrap_for_grad); child = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
sin: "f32[5]" = _wrap_for_grad.sin(); _wrap_for_grad = None
primals_out: "f32[]" = sin.sum(); sin = None
results: "f32[]" = torch._C._functorch._unwrap_for_grad(primals_out, 1); primals_out = None
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (results,)
""",
)
def test_vjp_multiple_outputs(self):
counters.clear()
def wrapper_fn(x, v):
fn = lambda x: (x.sin(), x.cos()) # noqa: E731
(out, vjpfunc) = torch.func.vjp(fn, x)
vjps = vjpfunc((v, v))
return out, vjps
x = torch.randn([5])
v = torch.randn(5)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 8486,
"end": 8771
} | class ____(DagsterError):
"""Thrown when a run cannot be found in run storage."""
def __init__(self, *args, **kwargs):
self.invalid_run_id = check.str_param(kwargs.pop("invalid_run_id"), "invalid_run_id")
super().__init__(*args, **kwargs)
| DagsterRunNotFoundError |
python | tensorflow__tensorflow | tensorflow/python/distribute/vars_test.py | {
"start": 19315,
"end": 28183
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(ms_combination)
def testScatterSub(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_sub():
ctx = distribute_lib.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops_stack.stack([
math_ops.cast(replica_id, dtypes.float32),
math_ops.cast(replica_id + 1, dtypes.float32)
]),
indices=array_ops_stack.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_sub(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_sub)))
self.assertAllEqual([[0., -1., -1.], [0., -1., -1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterAdd(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_add():
ctx = distribute_lib.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops_stack.stack([replica_id, replica_id + 1]),
indices=array_ops_stack.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_add(value)
per_replica_results = self.evaluate(
test_util.gather(distribution, distribution.run(scatter_add)))
self.assertAllEqual([[0, 2, 2], [0, 2, 2]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterDiv(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[1, 6, 1], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_div():
ctx = distribute_lib.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(replica_id + 2, [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_div(value)
per_replica_results = self.evaluate(
test_util.gather(distribution, distribution.run(scatter_div)))
self.assertAllEqual([[0, 2, 1], [0, 2, 1]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMul(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_mul():
ctx = distribute_lib.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(
math_ops.cast(replica_id + 2, dtypes.float32), [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_mul(value)
per_replica_results = self.evaluate(
test_util.gather(distribution, distribution.run(scatter_mul)))
self.assertAllClose([[2., 1.5, 1.], [2., 1.5, 1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMin(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 2, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 2, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_min(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_min(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_min.*"):
self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_min, args=(v1,))))
per_replica_results = self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_min, args=(v2,))))
self.assertAllClose([[0, 1, 0], [0, 1, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMax(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_max(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([0]),
dense_shape=(3,))
return v.scatter_max(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_max.*"):
self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_max, args=(v1,))))
per_replica_results = self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_max, args=(v2,))))
self.assertAllClose([[1, 0, 0], [1, 0, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterUpdate(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_update(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([3]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_update(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_update.*"):
self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_update, args=(v1,))))
per_replica_results = self.evaluate(
test_util.gather(distribution,
distribution.run(scatter_update, args=(v2,))))
self.assertAllClose([[0, 3, 0], [0, 3, 0]], per_replica_results)
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsWithNoneAggregation(self, distribution):
config.disable_mlir_bridge()
def assert_close(v, op, delta, expect):
scatter_op = getattr(v, op)
@def_function.function
def scatter_xxx():
return scatter_op(delta)
per_replica_results = self.evaluate(
variable_utils.convert_variables_to_tensors(
distribution.experimental_local_results(
distribution.run(scatter_xxx))))
self.assertAllClose([expect, expect], per_replica_results)
with distribution.scope():
v = variables_lib.Variable(
[4.], aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
delta = indexed_slices.IndexedSlices(
values=array_ops.identity([2.]),
indices=array_ops.identity([0]),
dense_shape=(1,))
assert_close(v, "scatter_sub", delta, [2.])
assert_close(v, "scatter_add", delta, [4.])
assert_close(v, "scatter_max", delta, [4.])
assert_close(v, "scatter_min", delta, [2.])
assert_close(v, "scatter_mul", delta, [4.])
assert_close(v, "scatter_div", delta, [2.])
assert_close(v, "scatter_update", delta, [2.])
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsInCrossReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[1, 1, 1], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable([1, 1, 1])
self.evaluate(variables_lib.global_variables_initializer())
value = indexed_slices.IndexedSlices(
values=array_ops.identity([2]),
indices=array_ops.identity([0]),
dense_shape=(3,))
with distribution.scope():
self.evaluate(v1.scatter_add(value))
self.assertAllEqual([3, 1, 1], self.evaluate(v1.read_value()))
self.evaluate(v2.scatter_min(value))
self.assertAllEqual([1, 1, 1], self.evaluate(v2.read_value()))
| OnWriteVariableSyncScatterTests |
python | RaRe-Technologies__gensim | gensim/similarities/docsim.py | {
"start": 29340,
"end": 34581
} | class ____(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Unless the entire matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.similarities import MatrixSimilarity
>>>
>>> query = [(1, 2), (5, 4)]
>>> index = MatrixSimilarity(common_corpus, num_features=len(common_dictionary))
>>> sims = index[query]
"""
def __init__(self, corpus, num_best=None, dtype=numpy.float32, num_features=None, chunksize=256, corpus_len=None):
"""
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in streamed Gensim bag-of-words format.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
num_features : int
Size of the dictionary (number of features).
corpus_len : int, optional
Number of documents in `corpus`. If not specified, will scan the corpus to determine the matrix size.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
dtype : numpy.dtype, optional
Datatype to store the internal matrix in.
"""
if num_features is None:
logger.warning(
"scanning corpus to determine the number of features (consider setting `num_features` explicitly)"
)
num_features = 1 + utils.get_max_id(corpus)
self.num_features = num_features
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
if corpus_len is None:
corpus_len = len(corpus)
if corpus is not None:
if self.num_features <= 0:
raise ValueError(
"cannot index a corpus with zero features (you must specify either `num_features` "
"or a non-empty corpus in the constructor)"
)
logger.info("creating matrix with %i documents and %i features", corpus_len, num_features)
self.index = numpy.empty(shape=(corpus_len, num_features), dtype=dtype)
# iterate over corpus, populating the numpy index matrix with (normalized)
# document vectors
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.debug("PROGRESS: at document #%i/%i", docno, corpus_len)
# individual documents in fact may be in numpy.scipy.sparse format as well.
# it's not documented because other it's not fully supported throughout.
# the user better know what he's doing (no normalization, must
# explicitly supply num_features etc).
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = matutils.unitvec(matutils.sparse2full(vector, num_features))
self.index[docno] = vector
def __len__(self):
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly, use the :class:`~gensim.similarities.docsim.MatrixSimilarity.__getitem__`
instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = numpy.asarray(
[matutils.sparse2full(vec, self.num_features) for vec in query],
dtype=self.index.dtype
)
else:
if scipy.sparse.issparse(query):
query = query.toarray() # convert sparse to dense
elif isinstance(query, numpy.ndarray):
pass
else:
# default case: query is a single vector in sparse gensim format
query = matutils.sparse2full(query, self.num_features)
query = numpy.asarray(query, dtype=self.index.dtype)
# do a little transposition dance to stop numpy from making a copy of
# self.index internally in numpy.dot (very slow).
result = numpy.dot(self.index, query.T).T # return #queries x #index
return result # XXX: removed casting the result from array to list; does anyone care?
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.index.shape[1])
| MatrixSimilarity |
python | docker__docker-py | docker/errors.py | {
"start": 4659,
"end": 4843
} | class ____(DockerException):
def __init__(self, param):
self.param = param
def __str__(self):
return (f"missing parameter: {self.param}")
| MissingContextParameter |
python | django__django | tests/multiple_database/tests.py | {
"start": 49453,
"end": 50684
} | class ____(SimpleTestCase):
@override_settings(
DATABASE_ROUTERS=[
"multiple_database.tests.TestRouter",
"multiple_database.tests.WriteRouter",
]
)
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertEqual(
[r.__class__.__name__ for r in connection_router.routers],
["TestRouter", "WriteRouter"],
)
def test_router_init_arg(self):
connection_router = ConnectionRouter(
[
"multiple_database.tests.TestRouter",
"multiple_database.tests.WriteRouter",
]
)
self.assertEqual(
[r.__class__.__name__ for r in connection_router.routers],
["TestRouter", "WriteRouter"],
)
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertEqual(
[r.__class__.__name__ for r in connection_router.routers],
["TestRouter", "WriteRouter"],
)
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
| ConnectionRouterTestCase |
python | Lightning-AI__lightning | src/lightning/fabric/utilities/load.py | {
"start": 1358,
"end": 6092
} | class ____:
def __init__(
self,
metatensor: Tensor,
archiveinfo: "_LazyLoadingUnpickler",
storageinfo: tuple,
rebuild_args: tuple,
) -> None:
self.metatensor = metatensor
self.archiveinfo = archiveinfo
self.storageinfo = storageinfo
self.rebuild_args = rebuild_args
@classmethod
def rebuild_from_type_v2(
cls,
func: Callable,
new_type: _TensorMeta,
args: tuple,
state: dict,
*,
archiveinfo: Optional["_LazyLoadingUnpickler"] = None,
) -> Any:
ret = func(*args)
if isinstance(ret, _NotYetLoadedTensor):
old_lt = ret._load_tensor
def _load_tensor() -> Any:
t = old_lt()
return torch._tensor._rebuild_from_type_v2(lambda: t, new_type, (), state)
ret._load_tensor = _load_tensor # type: ignore[method-assign]
return ret
return torch._tensor._rebuild_from_type_v2(func, new_type, args, state)
@classmethod
def rebuild_parameter(
cls,
data: Any,
requires_grad: bool,
backward_hooks: OrderedDict,
*,
archiveinfo: Optional["_LazyLoadingUnpickler"] = None,
) -> Union[Tensor, "_NotYetLoadedTensor"]:
if isinstance(data, _NotYetLoadedTensor):
old_lt = data._load_tensor
def _load_tensor() -> Parameter:
t = old_lt()
return torch._utils._rebuild_parameter(t, requires_grad, backward_hooks)
data._load_tensor = _load_tensor # type: ignore[method-assign]
return data
return torch._utils._rebuild_parameter(data, requires_grad, backward_hooks)
@classmethod
def rebuild_tensor_v2(
cls,
storage: "TypedStorage",
storage_offset: int,
size: tuple,
stride: tuple,
requires_grad: bool,
backward_hooks: OrderedDict,
metadata: Optional[Any] = None,
*,
archiveinfo: "_LazyLoadingUnpickler",
) -> "_NotYetLoadedTensor":
rebuild_args = (storage_offset, size, stride, requires_grad, backward_hooks, metadata)
metatensor = torch._utils._rebuild_tensor_v2(
storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata
)
storageinfo = storage.archiveinfo
return _NotYetLoadedTensor(metatensor, archiveinfo, storageinfo, rebuild_args)
def _load_tensor(self) -> Tensor:
from torch.storage import TypedStorage, UntypedStorage
_, _, fn, _, size = self.storageinfo
dtype = self.metatensor.dtype
storage = self.archiveinfo.file_reader.get_storage_from_record(
f"data/{fn}", size * torch._utils._element_size(dtype), UntypedStorage
)
uts = storage._typed_storage()._untyped_storage
with warnings.catch_warnings():
# The TypedStorage APIs have heavy deprecations in torch, suppress all these warnings for now
warnings.simplefilter("ignore")
storage = TypedStorage(wrap_storage=uts, dtype=dtype, _internal=True)
return torch._utils._rebuild_tensor_v2(storage, *self.rebuild_args)
@classmethod
def __torch_function__(
cls,
func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Optional[dict] = None,
) -> Any:
kwargs = kwargs or {}
loaded_args = [(arg._load_tensor() if isinstance(arg, _NotYetLoadedTensor) else arg) for arg in args]
return func(*loaded_args, **kwargs)
@property
def device(self) -> torch.device:
return torch.device(self.storageinfo[3])
def __getattr__(self, name: str) -> Any:
# These properties don't require materialization and can be accessed through the meta tensor directly
if name in {
"dtype",
"grad",
"grad_fn",
"is_meta",
"layout",
"names",
"ndim",
"output_nr",
"requires_grad",
"retains_grad",
"size",
"shape",
"volatile",
}:
return getattr(self.metatensor, name)
# materializing these is needed for quantization (see lit-gpt)
if name in {"contiguous", "cuda", "half", "data", "to"}:
return getattr(self._load_tensor(), name)
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self.metatensor)})"
# Modified from https://github.com/lernapparat/torchhacks by Thomas Viehmann
| _NotYetLoadedTensor |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 15915,
"end": 16719
} | class ____:
min_length = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if len(value) < self.min_length:
msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}"
raise ValueError(msg)
def __repr__(self):
return f"<min_len validator for {self.min_length}>"
def min_len(length):
"""
A validator that raises `ValueError` if the initializer is called
with a string or iterable that is shorter than *length*.
Args:
length (int): Minimum length of the string or iterable
.. versionadded:: 22.1.0
"""
return _MinLengthValidator(length)
@attrs(repr=False, slots=True, unsafe_hash=True)
| _MinLengthValidator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 141414,
"end": 145195
} | class ____(DataplexCatalogBaseOperator):
"""
List AspectType resources.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogListAspectTypesOperator`
:param filter_by: Optional. Filter to apply on the list results.
:param order_by: Optional. Fields to order the results by.
:param page_size: Optional. Maximum number of AspectTypes to return on the page.
:param page_token: Optional. Token to retrieve the next page of results.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(DataplexCatalogBaseOperator.template_fields)
operator_extra_links = (DataplexCatalogAspectTypesLink(),)
def __init__(
self,
page_size: int | None = None,
page_token: str | None = None,
filter_by: str | None = None,
order_by: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.page_size = page_size
self.page_token = page_token
self.filter_by = filter_by
self.order_by = order_by
def execute(self, context: Context):
DataplexCatalogAspectTypesLink.persist(context=context)
self.log.info(
"Listing Dataplex Catalog AspectType from location %s.",
self.location,
)
try:
aspect_type_on_page = self.hook.list_aspect_types(
location=self.location,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter_by=self.filter_by,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("AspectType on page: %s", aspect_type_on_page)
context["ti"].xcom_push(
key="aspect_type_page",
value=ListAspectTypesResponse.to_dict(aspect_type_on_page._response),
)
except Exception as ex:
raise AirflowException(ex)
# Constructing list to return AspectTypes in readable format
aspect_types_list = [
MessageToDict(aspect_type._pb, preserving_proto_field_name=True)
for aspect_type in next(iter(aspect_type_on_page.pages)).aspect_types
]
return aspect_types_list
| DataplexCatalogListAspectTypesOperator |
python | prabhupant__python-ds | data_structures/graphs/print_all_paths_between_nodes.py | {
"start": 37,
"end": 1039
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
def print_path(self, s, d, visited, path):
visited[s] = True
path.append(s)
if s == d:
print(path)
else:
for i in self.graph[s]:
if visited[i] == False:
self.print_path(i, d, visited, path)
# If path from this node does not lead to the destination, remove it
# from the path stack and mark it as not visited
path.pop()
visited[s] = False
def print_all_paths(self, s, d):
visited = [False] * self.vertices
path = []
self.print_path(s, d, visited, path)
g = Graph(4)
g.add_edge(0, 3)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(2, 0)
g.add_edge(2, 1)
s = 2
d = 3
print(f'Paths from {s} to {d} are - ')
g.print_all_paths(s, d) | Graph |
python | google__jax | jax/_src/scipy/optimize/minimize.py | {
"start": 884,
"end": 4954
} | class ____(NamedTuple):
"""Object holding optimization results.
Parameters:
x: final solution.
success: ``True`` if optimization succeeded.
status: integer solver specific return code. 0 means converged (nominal),
1=max BFGS iters reached, 3=zoom failed, 4=saddle point reached,
5=max line search iters reached, -1=undefined
fun: final function value.
jac: final jacobian array.
hess_inv: final inverse Hessian estimate.
nfev: integer number of function calls used.
njev: integer number of gradient evaluations.
nit: integer number of iterations of the optimization algorithm.
"""
x: Array
success: bool | Array
status: int | Array
fun: Array
jac: Array
hess_inv: Array | None
nfev: int | Array
njev: int | Array
nit: int | Array
def minimize(
fun: Callable,
x0: Array,
args: tuple = (),
*,
method: str,
tol: float | None = None,
options: Mapping[str, Any] | None = None,
) -> OptimizeResults:
"""Minimization of scalar function of one or more variables.
This API for this function matches SciPy with some minor deviations:
- Gradients of ``fun`` are calculated automatically using JAX's autodiff
support when required.
- The ``method`` argument is required. You must specify a solver.
- Various optional arguments in the SciPy interface have not yet been
implemented.
- Optimization results may differ from SciPy due to differences in the line
search implementation.
``minimize`` supports :func:`~jax.jit` compilation. It does not yet support
differentiation or arguments in the form of multi-dimensional arrays, but
support for both is planned.
Args:
fun: the objective function to be minimized, ``fun(x, *args) -> float``,
where ``x`` is a 1-D array with shape ``(n,)`` and ``args`` is a tuple
of the fixed parameters needed to completely specify the function.
``fun`` must support differentiation.
x0: initial guess. Array of real elements of size ``(n,)``, where ``n`` is
the number of independent variables.
args: extra arguments passed to the objective function.
method: solver type. Currently only ``"BFGS"`` is supported.
tol: tolerance for termination. For detailed control, use solver-specific
options.
options: a dictionary of solver options. All methods accept the following
generic options:
- maxiter (int): Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
Returns:
An :class:`OptimizeResults` object.
"""
if options is None:
options = {}
if not isinstance(args, tuple):
msg = "args argument to jax.scipy.optimize.minimize must be a tuple, got {}"
raise TypeError(msg.format(args))
fun_with_args = lambda x: fun(x, *args)
if method.lower() == 'bfgs':
results = minimize_bfgs(fun_with_args, x0, **options)
success = results.converged & jnp.logical_not(results.failed)
return OptimizeResults(x=results.x_k,
success=success,
status=results.status,
fun=results.f_k,
jac=results.g_k,
hess_inv=results.H_k,
nfev=results.nfev,
njev=results.ngev,
nit=results.k)
if method.lower() == 'l-bfgs-experimental-do-not-rely-on-this':
results = _minimize_lbfgs(fun_with_args, x0, **options)
success = results.converged & jnp.logical_not(results.failed)
return OptimizeResults(x=results.x_k,
success=success,
status=results.status,
fun=results.f_k,
jac=results.g_k,
hess_inv=None,
nfev=results.nfev,
njev=results.ngev,
nit=results.k)
raise ValueError(f"Method {method} not recognized")
| OptimizeResults |
python | getsentry__sentry | src/sentry/plugins/base/structs.py | {
"start": 228,
"end": 615
} | class ____:
def __init__(self, event, rule=None, rules=None):
if rule and not rules:
rules = [rule]
self.event = event
self.rules = rules or []
@property
def rule(self):
warnings.warn(
"Notification.rule is deprecated. Switch to Notification.rules.", DeprecationWarning
)
return self.rules[0]
| Notification |
python | mlflow__mlflow | mlflow/sklearn/__init__.py | {
"start": 20478,
"end": 21548
} | class ____:
_SUPPORTED_CUSTOM_PREDICT_FN = [
"predict_proba",
"predict_log_proba",
"predict_joint_log_proba",
"score",
]
def __init__(self, sklearn_model):
self.sklearn_model = sklearn_model
# Patch the model with custom predict functions that can be specified
# via `pyfunc_predict_fn` argument when saving or logging.
for predict_fn in self._SUPPORTED_CUSTOM_PREDICT_FN:
if fn := getattr(self.sklearn_model, predict_fn, None):
setattr(self, predict_fn, fn)
def get_raw_model(self):
"""
Returns the underlying scikit-learn model.
"""
return self.sklearn_model
def predict(
self,
data,
params: dict[str, Any] | None = None,
):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
return self.sklearn_model.predict(data)
| _SklearnModelWrapper |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 4178,
"end": 4418
} | class ____:
def __new__() -> InvalidButPluginDoesNotCrash:
...
def __enter__() -> InvalidButPluginDoesNotCrash:
...
async def __aenter__() -> InvalidButPluginDoesNotCrash:
...
| InvalidButPluginDoesNotCrash |
python | sympy__sympy | sympy/stats/crv.py | {
"start": 1290,
"end": 1554
} | class ____(RandomDomain):
"""
A domain with continuous support
Represented using symbols and Intervals.
"""
is_Continuous = True
def as_boolean(self):
raise NotImplementedError("Not Implemented for generic Domains")
| ContinuousDomain |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_nudge.py | {
"start": 326,
"end": 1542
} | class ____(SlackActivityNotificationTest):
@responses.activate
def test_nudge_block(self) -> None:
notification = IntegrationNudgeNotification(
self.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
seed=SEED,
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == MESSAGE_LIBRARY[SEED].format(provider="Slack")
assert blocks[0]["text"]["text"] == fallback_text
assert len(blocks[1]["elements"]) == 1
assert blocks[1]["elements"][0]["action_id"] == "enable_notifications"
assert blocks[1]["elements"][0]["text"]["text"] == "Turn on personal notifications"
assert blocks[1]["elements"][0]["value"] == "all_slack"
# Slack requires callback_id to handle enablement
context_params = orjson.loads(
orjson.loads(self.mock_post.call_args.kwargs["blocks"])[0]["block_id"]
)
assert context_params == {"enable_notifications": True}
| SlackNudgeNotificationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 65332,
"end": 65664
} | class ____(sgqlc.types.Enum):
"""The layout of a project v2 view.
Enumeration Choices:
* `BOARD_LAYOUT`: Board layout
* `ROADMAP_LAYOUT`: Roadmap layout
* `TABLE_LAYOUT`: Table layout
"""
__schema__ = github_schema
__choices__ = ("BOARD_LAYOUT", "ROADMAP_LAYOUT", "TABLE_LAYOUT")
| ProjectV2ViewLayout |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 107708,
"end": 107866
} | class ____(InvalidPackageOpError):
"""Raised when attempting an invalid operation on a package that requires a manual download."""
| ManualDownloadRequiredError |
python | Pylons__pyramid | tests/test_util.py | {
"start": 12174,
"end": 12931
} | class ____(unittest.TestCase):
def _callFUT(self, *args, **kw):
from pyramid.util import strings_differ
return strings_differ(*args, **kw)
def test_it_bytes(self):
self.assertFalse(self._callFUT(b'foo', b'foo'))
self.assertTrue(self._callFUT(b'123', b'345'))
self.assertTrue(self._callFUT(b'1234', b'123'))
self.assertTrue(self._callFUT(b'123', b'1234'))
def test_it_native_str(self):
self.assertFalse(self._callFUT('123', '123'))
self.assertTrue(self._callFUT('123', '1234'))
def test_it(self):
result = self._callFUT(b'foo', b'foo')
self.assertFalse(result)
result = self._callFUT(b'123', b'abc')
self.assertTrue(result)
| Test_strings_differ |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_looker.py | {
"start": 2000,
"end": 5969
} | class ____(LookerTestBase):
@mock.patch(OPERATOR_PATH.format("LookerHook"))
def test_execute(self, mock_hook):
# mock return vals from hook
mock_hook.return_value.start_pdt_build.return_value.materialization_id = TEST_JOB_ID
mock_hook.return_value.wait_for_job.return_value = None
# run task in mock context (asynchronous=False)
task = LookerStartPdtBuildOperator(
task_id=TASK_ID,
looker_conn_id=LOOKER_CONN_ID,
model=MODEL,
view=VIEW,
)
task.execute(context=self.mock_context)
# assertions
# hook's constructor called once
mock_hook.assert_called_once_with(looker_conn_id=LOOKER_CONN_ID)
# hook.start_pdt_build called once
mock_hook.return_value.start_pdt_build.assert_called_once_with(
model=MODEL,
view=VIEW,
query_params=None,
)
# hook.wait_for_job called once
mock_hook.return_value.wait_for_job.assert_called_once_with(
materialization_id=TEST_JOB_ID,
wait_time=10,
timeout=None,
)
@mock.patch(OPERATOR_PATH.format("LookerHook"))
def test_execute_async(self, mock_hook):
# mock return vals from hook
mock_hook.return_value.start_pdt_build.return_value.materialization_id = TEST_JOB_ID
mock_hook.return_value.wait_for_job.return_value = None
# run task in mock context (asynchronous=True)
task = LookerStartPdtBuildOperator(
task_id=TASK_ID,
looker_conn_id=LOOKER_CONN_ID,
model=MODEL,
view=VIEW,
asynchronous=True,
)
task.execute(context=self.mock_context)
# assertions
# hook's constructor called once
mock_hook.assert_called_once_with(looker_conn_id=LOOKER_CONN_ID)
# hook.start_pdt_build called once
mock_hook.return_value.start_pdt_build.assert_called_once_with(
model=MODEL,
view=VIEW,
query_params=None,
)
# hook.wait_for_job NOT called
mock_hook.return_value.wait_for_job.assert_not_called()
@mock.patch(OPERATOR_PATH.format("LookerHook"))
def test_on_kill(self, mock_hook):
# mock return vals from hook
mock_hook.return_value.start_pdt_build.return_value.materialization_id = TEST_JOB_ID
mock_hook.return_value.wait_for_job.return_value = None
# run task in mock context (cancel_on_kill=False)
task = LookerStartPdtBuildOperator(
task_id=TASK_ID,
looker_conn_id=LOOKER_CONN_ID,
model=MODEL,
view=VIEW,
cancel_on_kill=False,
)
task.execute(context=self.mock_context)
# kill and assert build is NOT canceled
task.on_kill()
mock_hook.return_value.stop_pdt_build.assert_not_called()
# alternatively, kill and assert build is canceled
task.cancel_on_kill = True
task.on_kill()
mock_hook.return_value.stop_pdt_build.assert_called_once_with(materialization_id=TEST_JOB_ID)
@mock.patch(OPERATOR_PATH.format("LookerHook"))
def test_materialization_id_returned_as_empty_str(self, mock_hook):
# mock return vals from hook
mock_hook.return_value.start_pdt_build.return_value.materialization_id = ""
mock_hook.return_value.wait_for_job.return_value = None
# run task in mock context (asynchronous=False)
task = LookerStartPdtBuildOperator(
task_id=TASK_ID,
looker_conn_id=LOOKER_CONN_ID,
model=MODEL,
view=VIEW,
)
# check AirflowException is raised
with pytest.raises(
AirflowException, match=f"No `materialization_id` was returned for model: {MODEL}, view: {VIEW}."
):
task.execute(context=self.mock_context)
| TestLookerStartPdtBuildOperator |
python | getsentry__sentry | tests/apidocs/endpoints/scim/test_group_index.py | {
"start": 184,
"end": 1197
} | class ____(APIDocsTestCase, SCIMTestCase):
def setUp(self) -> None:
super().setUp()
self.member = self.create_member(user=self.create_user(), organization=self.organization)
self.team = self.create_team(organization=self.organization, members=[self.user])
self.url = reverse(
"sentry-api-0-organization-scim-team-index",
kwargs={"organization_id_or_slug": self.organization.slug},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_post(self) -> None:
post_data = {
"schemas": ["urn:ietf:params:scim:schemas:core:2.0:Group"],
"displayName": "Test SCIMv2",
"members": [],
}
response = self.client.post(self.url, post_data)
request = RequestFactory().post(self.url, post_data)
self.validate_schema(request, response)
| SCIMTeamIndexDocs |
python | tornadoweb__tornado | tornado/test/netutil_test.py | {
"start": 5331,
"end": 5463
} | class ____(_ResolverTestMixin):
def setUp(self):
super().setUp()
self.resolver = CaresResolver()
| CaresResolverTest |
python | django__django | tests/backends/base/test_base.py | {
"start": 468,
"end": 3830
} | class ____(SimpleTestCase):
def test_repr(self):
conn = connections[DEFAULT_DB_ALIAS]
self.assertEqual(
repr(conn),
f"<DatabaseWrapper vendor={connection.vendor!r} alias='default'>",
)
def test_initialization_class_attributes(self):
"""
The "initialization" class attributes like client_class and
creation_class should be set on the class and reflected in the
corresponding instance attributes of the instantiated backend.
"""
conn = connections[DEFAULT_DB_ALIAS]
conn_class = type(conn)
attr_names = [
("client_class", "client"),
("creation_class", "creation"),
("features_class", "features"),
("introspection_class", "introspection"),
("ops_class", "ops"),
("validation_class", "validation"),
]
for class_attr_name, instance_attr_name in attr_names:
class_attr_value = getattr(conn_class, class_attr_name)
self.assertIsNotNone(class_attr_value)
instance_attr_value = getattr(conn, instance_attr_name)
self.assertIsInstance(instance_attr_value, class_attr_value)
def test_initialization_display_name(self):
self.assertEqual(BaseDatabaseWrapper.display_name, "unknown")
self.assertNotEqual(connection.display_name, "unknown")
def test_get_database_version(self):
with patch.object(BaseDatabaseWrapper, "__init__", return_value=None):
msg = (
"subclasses of BaseDatabaseWrapper may require a "
"get_database_version() method."
)
with self.assertRaisesMessage(NotImplementedError, msg):
BaseDatabaseWrapper().get_database_version()
def test_check_database_version_supported_with_none_as_database_version(self):
with patch.object(connection.features, "minimum_database_version", None):
connection.check_database_version_supported()
def test_release_memory_without_garbage_collection(self):
# Schedule the restore of the garbage collection settings.
self.addCleanup(gc.set_debug, 0)
self.addCleanup(gc.enable)
# Disable automatic garbage collection to control when it's triggered,
# then run a full collection cycle to ensure `gc.garbage` is empty.
gc.disable()
gc.collect()
# The garbage list isn't automatically populated to avoid CPU overhead,
# so debugging needs to be enabled to track all unreachable items and
# have them stored in `gc.garbage`.
gc.set_debug(gc.DEBUG_SAVEALL)
# Create a new connection that will be closed during the test, and also
# ensure that a `DatabaseErrorWrapper` is created for this connection.
test_connection = connection.copy()
with test_connection.wrap_database_errors:
self.assertEqual(test_connection.queries, [])
# Close the connection and remove references to it. This will mark all
# objects related to the connection as garbage to be collected.
test_connection.close()
test_connection = None
# Enforce garbage collection to populate `gc.garbage` for inspection.
gc.collect()
self.assertEqual(gc.garbage, [])
| DatabaseWrapperTests |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 2411,
"end": 2607
} | class ____(Value):
combinable = False
def __init__(self, *tags):
self.tags = tags
def to_string(self):
return "{" + " | ".join(str(t) for t in self.tags) + "}"
| TagValue |
python | tensorflow__tensorflow | tensorflow/python/framework/errors_impl.py | {
"start": 14692,
"end": 15171
} | class ____(OpError):
"""Raised when an operation was aborted, typically due to a concurrent action.
For example, running a
`tf.queue.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.queue.QueueBase.close` operation
previously ran.
"""
def __init__(self, node_def, op, message, *args):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED, *args)
@tf_export("errors.OutOfRangeError")
| AbortedError |
python | astropy__astropy | astropy/modeling/tests/test_spline.py | {
"start": 880,
"end": 11678
} | class ____:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f"test{i}": mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f"new{i}": mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {"test": "test"}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, "degree")
# Call _init_spline
with mk.patch.object(_Spline, "_init_spline", autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
MESSAGE = r"If one passes a coeffs vector one needs to also pass knots!"
with pytest.raises(ValueError, match=MESSAGE):
self.Spline(coeffs=mk.MagicMock())
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple(mk.MagicMock() for _ in range(3))
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple(mk.MagicMock() for _ in range(3))
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg("test") == "_test"
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(
ValueError, match=r"Optional argument .* already exists in this class!"
):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg in self.optional_inputs.keys():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(
RuntimeError, match=r".* has already been set, something has gone wrong!"
):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(
_Spline,
"_intercept_optional_inputs",
autospec=True,
return_value=new_kwargs,
) as mkIntercept:
with mk.patch.object(FittableModel, "__call__", autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, "test")
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, "fixed_test", True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert (
param.value
== fixed_test[index]
== spl.fixed_test[index]
== base_vec[index]
)
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, "_create_parameter", autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple(f"test_param{idx}" for idx in range(20))
assert mkCreate.call_args_list == [
mk.call(spl, f"test_param{idx}", idx, "test", fixed)
for idx in range(20)
]
def test__init_parameters(self):
spl = self.Spline()
MESSAGE = r"This needs to be implemented"
with pytest.raises(NotImplementedError, match=MESSAGE):
spl._init_parameters()
def test__init_data(self):
spl = self.Spline()
MESSAGE = r"This needs to be implemented"
with pytest.raises(NotImplementedError, match=MESSAGE):
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
with pytest.raises(NotImplementedError, match=MESSAGE):
spl._init_data(mk.MagicMock(), mk.MagicMock())
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(
_Spline, "_init_parameters", autospec=True
) as mkParameters:
with mk.patch.object(_Spline, "_init_data", autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, "parameters")
main.attach_mock(mkData, "data")
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl),
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
| TestSpline |
python | getsentry__sentry | tests/sentry/integrations/slack/test_link_identity.py | {
"start": 6040,
"end": 7496
} | class ____(SlackIntegrationLinkIdentityTestBase):
def setUp(self) -> None:
super().setUp()
self.unlinking_url = build_unlinking_url(
self.integration.id,
self.external_id,
self.channel_id,
self.response_url,
)
def test_basic_flow(self) -> None:
# Load page.
response = self.client.get(self.unlinking_url)
assert response.status_code == 200
self.assertTemplateUsed(response, "sentry/auth-unlink-identity.html")
# Unlink identity of user.
response = self.client.post(self.unlinking_url)
assert response.status_code == 200
self.assertTemplateUsed(response, "sentry/integrations/slack/unlinked.html")
assert not Identity.objects.filter(external_id="new-slack-id", user=self.user).exists()
assert self.mock_webhook.call_count == 1
def test_user_with_multiple_organizations(self) -> None:
# Create a second organization where the user is _not_ a member.
self.create_organization_integration(
organization_id=self.create_organization(name="Another Org").id,
integration=self.integration,
)
# Unlink identity of user.
self.client.post(self.unlinking_url)
assert not Identity.objects.filter(external_id="new-slack-id", user=self.user).exists()
assert self.mock_webhook.call_count == 1
| SlackIntegrationUnlinkIdentityTest |
python | pandas-dev__pandas | pandas/core/computation/ops.py | {
"start": 4603,
"end": 7816
} | class ____:
"""
Hold an operator of arbitrary arity.
"""
op: str
def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None:
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = encoding
def __iter__(self) -> Iterator:
return iter(self.operands)
def __repr__(self) -> str:
"""
Print a generic n-ary operator and its operands using infix notation.
"""
# recurse over the operands
parened = (f"({pprint_thing(opr)})" for opr in self.operands)
return pprint_thing(f" {self.op} ".join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS):
return np.bool_
return result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self) -> bool:
types = self.operand_types
obj_dtype_set = frozenset([np.dtype("object")])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self) -> bool:
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self) -> bool:
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""
Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""
Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in")
_cmp_ops_funcs = (
operator.gt,
operator.lt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
_in,
_not_in,
)
_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs, strict=True))
BOOL_OPS_SYMS = ("&", "|", "and", "or")
_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_)
_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs, strict=True))
ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%")
_arith_ops_funcs = (
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.pow,
operator.floordiv,
operator.mod,
)
_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs, strict=True))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def is_term(obj) -> bool:
return isinstance(obj, Term)
| Op |
python | doocs__leetcode | solution/2400-2499/2470.Number of Subarrays With LCM Equal to K/Solution.py | {
"start": 0,
"end": 296
} | class ____:
def subarrayLCM(self, nums: List[int], k: int) -> int:
n = len(nums)
ans = 0
for i in range(n):
a = nums[i]
for b in nums[i:]:
x = lcm(a, b)
ans += x == k
a = x
return ans
| Solution |
python | mwaskom__seaborn | tests/_core/test_scales.py | {
"start": 548,
"end": 10290
} | class ____:
@pytest.fixture
def x(self):
return pd.Series([1, 3, 9], name="x", dtype=float)
def setup_ticks(self, x, *args, **kwargs):
s = Continuous().tick(*args, **kwargs)._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
a.set_view_interval(0, 1)
return a
def setup_labels(self, x, *args, **kwargs):
s = Continuous().label(*args, **kwargs)._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
a.set_view_interval(0, 1)
locs = a.major.locator()
return a, locs
def test_coordinate_defaults(self, x):
s = Continuous()._setup(x, Coordinate())
assert_series_equal(s(x), x)
def test_coordinate_transform(self, x):
s = Continuous(trans="log")._setup(x, Coordinate())
assert_series_equal(s(x), np.log10(x))
def test_coordinate_transform_with_parameter(self, x):
s = Continuous(trans="pow3")._setup(x, Coordinate())
assert_series_equal(s(x), np.power(x, 3))
def test_coordinate_transform_error(self, x):
s = Continuous(trans="bad")
with pytest.raises(ValueError, match="Unknown value provided"):
s._setup(x, Coordinate())
def test_interval_defaults(self, x):
s = Continuous()._setup(x, IntervalProperty())
assert_array_equal(s(x), [0, .25, 1])
def test_interval_with_range(self, x):
s = Continuous((1, 3))._setup(x, IntervalProperty())
assert_array_equal(s(x), [1, 1.5, 3])
def test_interval_with_norm(self, x):
s = Continuous(norm=(3, 7))._setup(x, IntervalProperty())
assert_array_equal(s(x), [-.5, 0, 1.5])
def test_interval_with_range_norm_and_transform(self, x):
x = pd.Series([1, 10, 100])
# TODO param order?
s = Continuous((2, 3), (10, 100), "log")._setup(x, IntervalProperty())
assert_array_equal(s(x), [1, 2, 3])
def test_interval_with_bools(self):
x = pd.Series([True, False, False])
s = Continuous()._setup(x, IntervalProperty())
assert_array_equal(s(x), [1, 0, 0])
def test_color_defaults(self, x):
cmap = color_palette("ch:", as_cmap=True)
s = Continuous()._setup(x, Color())
assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA
def test_color_named_values(self, x):
cmap = color_palette("viridis", as_cmap=True)
s = Continuous("viridis")._setup(x, Color())
assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA
def test_color_tuple_values(self, x):
cmap = color_palette("blend:b,g", as_cmap=True)
s = Continuous(("b", "g"))._setup(x, Color())
assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA
def test_color_callable_values(self, x):
cmap = color_palette("light:r", as_cmap=True)
s = Continuous(cmap)._setup(x, Color())
assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA
def test_color_with_norm(self, x):
cmap = color_palette("ch:", as_cmap=True)
s = Continuous(norm=(3, 7))._setup(x, Color())
assert_array_equal(s(x), cmap([-.5, 0, 1.5])[:, :3]) # FIXME RGBA
def test_color_with_transform(self, x):
x = pd.Series([1, 10, 100], name="x", dtype=float)
cmap = color_palette("ch:", as_cmap=True)
s = Continuous(trans="log")._setup(x, Color())
assert_array_equal(s(x), cmap([0, .5, 1])[:, :3]) # FIXME RGBA
def test_tick_locator(self, x):
locs = [.2, .5, .8]
locator = mpl.ticker.FixedLocator(locs)
a = self.setup_ticks(x, locator)
assert_array_equal(a.major.locator(), locs)
def test_tick_locator_input_check(self, x):
err = "Tick locator must be an instance of .*?, not <class 'tuple'>."
with pytest.raises(TypeError, match=err):
Continuous().tick((1, 2))
def test_tick_upto(self, x):
for n in [2, 5, 10]:
a = self.setup_ticks(x, upto=n)
assert len(a.major.locator()) <= (n + 1)
def test_tick_every(self, x):
for d in [.05, .2, .5]:
a = self.setup_ticks(x, every=d)
assert np.allclose(np.diff(a.major.locator()), d)
def test_tick_every_between(self, x):
lo, hi = .2, .8
for d in [.05, .2, .5]:
a = self.setup_ticks(x, every=d, between=(lo, hi))
expected = np.arange(lo, hi + d, d)
assert_array_equal(a.major.locator(), expected)
def test_tick_at(self, x):
locs = [.2, .5, .9]
a = self.setup_ticks(x, at=locs)
assert_array_equal(a.major.locator(), locs)
def test_tick_count(self, x):
n = 8
a = self.setup_ticks(x, count=n)
assert_array_equal(a.major.locator(), np.linspace(0, 1, n))
def test_tick_count_between(self, x):
n = 5
lo, hi = .2, .7
a = self.setup_ticks(x, count=n, between=(lo, hi))
assert_array_equal(a.major.locator(), np.linspace(lo, hi, n))
def test_tick_minor(self, x):
n = 3
a = self.setup_ticks(x, count=2, minor=n)
expected = np.linspace(0, 1, n + 2)
if _version_predates(mpl, "3.8.0rc1"):
# I am not sure why matplotlib <3.8 minor ticks include the
# largest major location but exclude the smalllest one ...
expected = expected[1:]
assert_array_equal(a.minor.locator(), expected)
def test_log_tick_default(self, x):
s = Continuous(trans="log")._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
a.set_view_interval(.5, 1050)
ticks = a.major.locator()
assert np.allclose(np.diff(np.log10(ticks)), 1)
def test_log_tick_upto(self, x):
n = 3
s = Continuous(trans="log").tick(upto=n)._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
assert a.major.locator.numticks == n
def test_log_tick_count(self, x):
with pytest.raises(RuntimeError, match="`count` requires"):
Continuous(trans="log").tick(count=4)
s = Continuous(trans="log").tick(count=4, between=(1, 1000))
a = PseudoAxis(s._setup(x, Coordinate())._matplotlib_scale)
a.set_view_interval(.5, 1050)
assert_array_equal(a.major.locator(), [1, 10, 100, 1000])
def test_log_tick_format_disabled(self, x):
s = Continuous(trans="log").label(base=None)._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
a.set_view_interval(20, 20000)
labels = a.major.formatter.format_ticks(a.major.locator())
for text in labels:
assert re.match(r"^\d+$", text)
def test_log_tick_every(self, x):
with pytest.raises(RuntimeError, match="`every` not supported"):
Continuous(trans="log").tick(every=2)
def test_symlog_tick_default(self, x):
s = Continuous(trans="symlog")._setup(x, Coordinate())
a = PseudoAxis(s._matplotlib_scale)
a.set_view_interval(-1050, 1050)
ticks = a.major.locator()
assert ticks[0] == -ticks[-1]
pos_ticks = np.sort(np.unique(np.abs(ticks)))
assert np.allclose(np.diff(np.log10(pos_ticks[1:])), 1)
assert pos_ticks[0] == 0
def test_label_formatter(self, x):
fmt = mpl.ticker.FormatStrFormatter("%.3f")
a, locs = self.setup_labels(x, fmt)
labels = a.major.formatter.format_ticks(locs)
for text in labels:
assert re.match(r"^\d\.\d{3}$", text)
def test_label_like_pattern(self, x):
a, locs = self.setup_labels(x, like=".4f")
labels = a.major.formatter.format_ticks(locs)
for text in labels:
assert re.match(r"^\d\.\d{4}$", text)
def test_label_like_string(self, x):
a, locs = self.setup_labels(x, like="x = {x:.1f}")
labels = a.major.formatter.format_ticks(locs)
for text in labels:
assert re.match(r"^x = \d\.\d$", text)
def test_label_like_function(self, x):
a, locs = self.setup_labels(x, like="{:^5.1f}".format)
labels = a.major.formatter.format_ticks(locs)
for text in labels:
assert re.match(r"^ \d\.\d $", text)
def test_label_base(self, x):
a, locs = self.setup_labels(100 * x, base=2)
labels = a.major.formatter.format_ticks(locs)
for text in labels[1:]:
assert not text or "2^" in text
def test_label_unit(self, x):
a, locs = self.setup_labels(1000 * x, unit="g")
labels = a.major.formatter.format_ticks(locs)
for text in labels[1:-1]:
assert re.match(r"^\d+ mg$", text)
def test_label_unit_with_sep(self, x):
a, locs = self.setup_labels(1000 * x, unit=("", "g"))
labels = a.major.formatter.format_ticks(locs)
for text in labels[1:-1]:
assert re.match(r"^\d+mg$", text)
def test_label_empty_unit(self, x):
a, locs = self.setup_labels(1000 * x, unit="")
labels = a.major.formatter.format_ticks(locs)
for text in labels[1:-1]:
assert re.match(r"^\d+m$", text)
def test_label_base_from_transform(self, x):
s = Continuous(trans="log")
a = PseudoAxis(s._setup(x, Coordinate())._matplotlib_scale)
a.set_view_interval(10, 1000)
label, = a.major.formatter.format_ticks([100])
assert r"10^{2}" in label
def test_label_type_checks(self):
s = Continuous()
with pytest.raises(TypeError, match="Label formatter must be"):
s.label("{x}")
with pytest.raises(TypeError, match="`like` must be"):
s.label(like=2)
| TestContinuous |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchvision_models.py | {
"start": 12133,
"end": 13035
} | class ____(nn.Module):
__constants__ = ["aux_classifier"]
def __init__(self, backbone, classifier, aux_classifier=None):
super().__init__()
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def forward(self, x):
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["aux"] = x
return result
| _SimpleSegmentationModel |
python | ipython__ipython | IPython/lib/backgroundjobs.py | {
"start": 16756,
"end": 17680
} | class ____(BackgroundJobBase):
"""Run a function call as a background job (uses a separate thread)."""
def __init__(self, func, *args, **kwargs):
"""Create a new job from a callable object.
Any positional arguments and keyword args given to this constructor
after the initial callable are passed directly to it."""
if not callable(func):
raise TypeError(
'first argument to BackgroundJobFunc must be callable')
self.func = func
self.args = args
self.kwargs = kwargs
# The string form will only include the function passed, because
# generating string representations of the arguments is a potentially
# _very_ expensive operation (e.g. with large arrays).
self.strform = str(func)
self._init()
def call(self):
return self.func(*self.args, **self.kwargs)
| BackgroundJobFunc |
python | bokeh__bokeh | src/bokeh/protocol/exceptions.py | {
"start": 1670,
"end": 1881
} | class ____(Exception):
''' Indicate an error in processing wire protocol fragments.
This exception indicates that decoded message fragments cannot be properly
assembled.
'''
pass
| ProtocolError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1592924,
"end": 1593086
} | class ____(sgqlc.types.Union):
"""Types that can be pinned to a profile page."""
__schema__ = github_schema
__types__ = (Gist, Repository)
| PinnableItem |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 41465,
"end": 48404
} | class ____(TreeTestCase):
fixtures = ["categories.json", "genres.json", "persons.json"]
def test_all_managers_are_different(self):
# all tree managers should be different. otherwise, possible infinite recursion.
seen = {}
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
tm = model._tree_manager
if id(tm) in seen:
self.fail(
f"Tree managers for {model.__name__} and {seen[id(tm)].__name__} are the same manager"
)
seen[id(tm)] = model
def test_manager_multi_table_inheritance(self):
self.assertIs(Student._tree_manager.model, Student)
self.assertIs(Student._tree_manager.tree_model, Person)
self.assertIs(Person._tree_manager.model, Person)
self.assertIs(Person._tree_manager.tree_model, Person)
def test_all_managers_have_correct_model(self):
# all tree managers should have the correct model.
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
self.assertEqual(model._tree_manager.model, model)
def test_base_manager_infinite_recursion(self):
# repeatedly calling _base_manager should eventually return None
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
manager = model._tree_manager
for _i in range(20):
manager = manager._base_manager
if manager is None:
break
else:
self.fail(
"Detected infinite recursion in %s._tree_manager._base_manager"
% model
)
def test_proxy_custom_manager(self):
self.assertIsInstance(SingleProxyModel._tree_manager, CustomTreeManager)
self.assertIsInstance(SingleProxyModel._tree_manager._base_manager, TreeManager)
self.assertIsInstance(SingleProxyModel.objects, CustomTreeManager)
self.assertIsInstance(SingleProxyModel.objects._base_manager, TreeManager)
def test_get_queryset_descendants(self):
def get_desc_names(qs, include_self=False):
desc = qs.model.objects.get_queryset_descendants(
qs, include_self=include_self
)
return list(desc.values_list("name", flat=True).order_by("name"))
qs = Category.objects.filter(Q(name="Nintendo Wii") | Q(name="PlayStation 3"))
self.assertEqual(
get_desc_names(qs),
["Games", "Games", "Hardware & Accessories", "Hardware & Accessories"],
)
self.assertEqual(
get_desc_names(qs, include_self=True),
[
"Games",
"Games",
"Hardware & Accessories",
"Hardware & Accessories",
"Nintendo Wii",
"PlayStation 3",
],
)
qs = Genre.objects.filter(parent=None)
self.assertEqual(
get_desc_names(qs),
[
"2D Platformer",
"3D Platformer",
"4D Platformer",
"Action RPG",
"Horizontal Scrolling Shootemup",
"Platformer",
"Shootemup",
"Tactical RPG",
"Vertical Scrolling Shootemup",
],
)
self.assertEqual(
get_desc_names(qs, include_self=True),
[
"2D Platformer",
"3D Platformer",
"4D Platformer",
"Action",
"Action RPG",
"Horizontal Scrolling Shootemup",
"Platformer",
"Role-playing Game",
"Shootemup",
"Tactical RPG",
"Vertical Scrolling Shootemup",
],
)
def _get_anc_names(self, qs, include_self=False):
ancestor = qs.model.objects.get_queryset_ancestors(
qs, include_self=include_self
)
return list(ancestor.values_list("name", flat=True).order_by("name"))
def test_get_queryset_ancestors(self):
qs = Category.objects.filter(Q(name="Nintendo Wii") | Q(name="PlayStation 3"))
self.assertEqual(self._get_anc_names(qs), ["PC & Video Games"])
self.assertEqual(
self._get_anc_names(qs, include_self=True),
["Nintendo Wii", "PC & Video Games", "PlayStation 3"],
)
qs = Genre.objects.filter(parent=None)
self.assertEqual(self._get_anc_names(qs), [])
self.assertEqual(
self._get_anc_names(qs, include_self=True), ["Action", "Role-playing Game"]
)
def test_get_queryset_ancestors_regression_379(self):
# https://github.com/django-mptt/django-mptt/issues/379
qs = Genre.objects.all()
self.assertEqual(
self._get_anc_names(qs, include_self=True),
list(Genre.objects.values_list("name", flat=True).order_by("name")),
)
def test_custom_querysets(self):
"""
Test that a custom manager also provides custom querysets.
"""
self.assertTrue(isinstance(Person.objects.all(), CustomTreeQueryset))
self.assertTrue(
isinstance(Person.objects.all()[0].get_children(), CustomTreeQueryset)
)
self.assertTrue(hasattr(Person.objects.none(), "custom_method"))
# Check that empty querysets get custom methods
self.assertTrue(
hasattr(Person.objects.all()[0].get_children().none(), "custom_method")
)
self.assertEqual(type(Person.objects.all()), type(Person.objects.root_nodes()))
def test_manager_from_custom_queryset(self):
"""
Test that a manager created from a custom queryset works.
Regression test for #378.
"""
TreeManager.from_queryset(CustomTreeQueryset)().contribute_to_class(
Genre, "my_manager"
)
self.assertIsInstance(Genre.my_manager.get_queryset(), CustomTreeQueryset)
def test_num_queries_on_get_queryset_descendants(self):
"""
Test the number of queries to access descendants
is not O(n).
At the moment it is O(1)+1.
Ideally we should aim for O(1).
"""
with self.assertNumQueries(2):
qs = Category.objects.get_queryset_descendants(
Category.objects.all(), include_self=True
)
self.assertEqual(len(qs), 10)
def test_default_manager_with_multiple_managers(self):
"""
Test that a model with multiple managers defined always uses the
default manager as the tree manager.
"""
self.assertEqual(type(MultipleManagerModel._tree_manager), TreeManager)
| ManagerTests |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 14960,
"end": 15338
} | class ____(ValidationActionRegistryError):
def __init__(self, action_type: str | None) -> None:
if action_type:
message = f"Invalid action configuration; no action of type {action_type} found."
else:
message = "Invalid action configuration; no 'type' key found."
super().__init__(message)
| ValidationActionRegistryRetrievalError |
python | numpy__numpy | numpy/lib/_arraysetops_impl.py | {
"start": 14944,
"end": 15030
} | class ____(NamedTuple):
values: np.ndarray
counts: np.ndarray
| UniqueCountsResult |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/type/definition.py | {
"start": 11625,
"end": 14224
} | class ____(GraphQLType):
"""Enum Type Definition
Some leaf values of requests and input values are Enums. GraphQL serializes Enum values as strings,
however internally Enums can be represented by any kind of type, often integers.
Example:
RGBType = GraphQLEnumType(
name='RGB',
values=OrderedDict([
('RED', GraphQLEnumValue(0)),
('GREEN', GraphQLEnumValue(1)),
('BLUE', GraphQLEnumValue(2))
])
)
Note: If a value is not provided in a definition, the name of the enum value will be used as it's internal value.
"""
def __init__(self, name, values, description=None):
assert name, 'Type must provide name.'
assert_valid_name(name)
self.name = name
self.description = description
self.values = define_enum_values(self, values)
def serialize(self, value):
if isinstance(value, Hashable):
enum_value = self._value_lookup.get(value)
if enum_value:
return enum_value.name
return None
def parse_value(self, value):
if isinstance(value, Hashable):
enum_value = self._name_lookup.get(value)
if enum_value:
return enum_value.value
return None
def parse_literal(self, value_ast):
if isinstance(value_ast, ast.EnumValue):
enum_value = self._name_lookup.get(value_ast.value)
if enum_value:
return enum_value.value
@cached_property
def _value_lookup(self):
return {value.value: value for value in self.values}
@cached_property
def _name_lookup(self):
return {value.name: value for value in self.values}
def define_enum_values(type, value_map):
assert isinstance(value_map, Mapping) and len(value_map) > 0, (
'{} values must be a mapping (dict / OrderedDict) with value names as keys.'.format(type)
)
values = []
if not isinstance(value_map, (collections.OrderedDict, OrderedDict)):
value_map = OrderedDict(sorted(list(value_map.items())))
for value_name, value in value_map.items():
assert_valid_name(value_name)
assert isinstance(value, GraphQLEnumValue), (
'{}.{} must be an instance of GraphQLEnumValue, but got: {}'.format(type, value_name, value)
)
value = copy.copy(value)
value.name = value_name
if value.value is None:
value.value = value_name
values.append(value)
return values
| GraphQLEnumType |
python | kamyu104__LeetCode-Solutions | Python/remove-element.py | {
"start": 29,
"end": 444
} | class ____(object):
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
i, last = 0, len(A) - 1
while i <= last:
if A[i] == elem:
A[i], A[last] = A[last], A[i]
last -= 1
else:
i += 1
return last + 1
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/combinations_test.py | {
"start": 8056,
"end": 8286
} | class ____(test.TestCase, parameterized.TestCase):
def testSysArgvClearedIsFine(self):
original_argv = list(sys.argv)
sys.argv.clear()
importlib.reload(combinations)
sys.argv = original_argv
| ModuleInitializingTest |
python | django__django | tests/queries/models.py | {
"start": 9386,
"end": 9454
} | class ____(ObjectA):
class Meta:
proxy = True
| ProxyObjectA |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 4441,
"end": 4770
} | class ____(BaseField):
"""Integer field."""
types = (int,)
def parse_value(self, value: Any) -> Optional[int]:
"""Cast value to `int`, e.g. from string or long"""
parsed = super(IntField, self).parse_value(value)
if parsed is None:
return parsed
return int(parsed)
| IntField |
python | encode__django-rest-framework | tests/schemas/test_managementcommand.py | {
"start": 545,
"end": 843
} | class ____:
SCHEMA = {"key": "value"}
def __init__(self, *args, **kwargs):
pass
def get_schema(self, **kwargs):
return self.SCHEMA
@override_settings(ROOT_URLCONF=__name__)
@pytest.mark.skipif(not uritemplate, reason='uritemplate is not installed')
| CustomSchemaGenerator |
python | doocs__leetcode | solution/3600-3699/3645.Maximum Total from Optimal Activation Order/Solution.py | {
"start": 0,
"end": 308
} | class ____:
def maxTotal(self, value: List[int], limit: List[int]) -> int:
g = defaultdict(list)
for v, lim in zip(value, limit):
g[lim].append(v)
ans = 0
for lim, vs in g.items():
vs.sort()
ans += sum(vs[-lim:])
return ans
| Solution |
python | eventlet__eventlet | tests/convenience_test.py | {
"start": 308,
"end": 6303
} | class ____(tests.LimitedTestCase):
def setUp(self):
super().setUp()
debug.hub_exceptions(False)
def tearDown(self):
super().tearDown()
debug.hub_exceptions(True)
def test_exiting_server(self):
# tests that the server closes the client sock on handle() exit
def closer(sock, addr):
pass
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, closer)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(b'a')
self.assertFalse(client.recv(100))
gt.kill()
def test_excepting_server(self):
# tests that the server closes the client sock on handle() exception
def crasher(sock, addr):
sock.recv(1024)
0 // 0
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, crasher)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(b'a')
self.assertRaises(ZeroDivisionError, gt.wait)
self.assertFalse(client.recv(100))
def test_excepting_server_already_closed(self):
# same as above but with explicit clsoe before crash
def crasher(sock, addr):
sock.recv(1024)
sock.close()
0 // 0
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, crasher)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(b'a')
self.assertRaises(ZeroDivisionError, gt.wait)
self.assertFalse(client.recv(100))
def test_called_for_each_connection(self):
hits = [0]
def counter(sock, addr):
hits[0] += 1
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, counter)
for i in range(100):
client = eventlet.connect(('localhost', l.getsockname()[1]))
self.assertFalse(client.recv(100))
gt.kill()
self.assertEqual(100, hits[0])
def test_blocking(self):
l = eventlet.listen(('localhost', 0))
x = eventlet.with_timeout(
0.01,
eventlet.serve, l, lambda c, a: None,
timeout_value="timeout")
self.assertEqual(x, "timeout")
def test_raising_stopserve(self):
def stopit(conn, addr):
raise eventlet.StopServe()
l = eventlet.listen(('localhost', 0))
# connect to trigger a call to stopit
gt = eventlet.spawn(eventlet.connect, ('localhost', l.getsockname()[1]))
eventlet.serve(l, stopit)
gt.wait()
def test_concurrency(self):
evt = eventlet.Event()
def waiter(sock, addr):
sock.sendall(b'hi')
evt.wait()
l = eventlet.listen(('localhost', 0))
eventlet.spawn(eventlet.serve, l, waiter, 5)
def test_client():
c = eventlet.connect(('localhost', l.getsockname()[1]))
# verify the client is connected by getting data
self.assertEqual(b'hi', c.recv(2))
return c
[test_client() for i in range(5)]
# very next client should not get anything
x = eventlet.with_timeout(
0.01,
test_client,
timeout_value="timed out")
self.assertEqual(x, "timed out")
@tests.skip_if_no_ssl
def test_wrap_ssl(self):
server = eventlet.wrap_ssl(
eventlet.listen(('localhost', 0)),
certfile=certificate_file, keyfile=private_key_file,
server_side=True)
port = server.getsockname()[1]
def handle(sock, addr):
sock.sendall(sock.recv(1024))
raise eventlet.StopServe()
eventlet.spawn(eventlet.serve, server, handle)
client = eventlet.wrap_ssl(eventlet.connect(('localhost', port)))
client.sendall(b"echo")
self.assertEqual(b"echo", client.recv(1024))
def test_socket_reuse():
# pick a free port with bind to 0 - without SO_REUSEPORT
# then close it and try to bind to same port with SO_REUSEPORT
# loop helps in case something else used the chosen port before second bind
addr = None
errors = []
for _ in range(5):
lsock1 = eventlet.listen(('localhost', 0))
addr = lsock1.getsockname()
lsock1.close()
try:
lsock1 = eventlet.listen(addr)
except OSError as e:
errors.append(e)
continue
break
else:
assert False, errors
if hasattr(socket, 'SO_REUSEPORT'):
lsock2 = eventlet.listen(addr)
else:
try:
lsock2 = eventlet.listen(addr)
assert lsock2
lsock2.close()
except OSError:
pass
lsock1.close()
def test_reuse_random_port_warning():
with warnings.catch_warnings(record=True) as w:
eventlet.listen(('localhost', 0), reuse_port=True).close()
assert len(w) == 1
assert issubclass(w[0].category, convenience.ReuseRandomPortWarning)
@tests.skip_unless(hasattr(socket, 'SO_REUSEPORT'))
def test_reuseport_oserror():
# https://github.com/eventlet/eventlet/issues/380
# https://github.com/eventlet/eventlet/issues/418
err22 = OSError(22, 'Invalid argument')
sock1 = eventlet.listen(('localhost', 0))
addr = sock1.getsockname()
sock1.close()
original_socket_init = socket.socket.__init__
def patched(self, *a, **kw):
original_socket_init(self, *a, **kw)
self.setsockopt = tests.mock.Mock(side_effect=err22)
with warnings.catch_warnings(record=True) as w:
try:
socket.socket.__init__ = patched
eventlet.listen(addr, reuse_addr=False, reuse_port=True).close()
finally:
socket.socket.__init__ = original_socket_init
assert len(w) == 1
assert issubclass(w[0].category, convenience.ReusePortUnavailableWarning)
| TestServe |
python | astropy__astropy | astropy/io/ascii/html.py | {
"start": 3149,
"end": 4199
} | class ____(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
header_elements = soup.find_all("th")
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [
(el.text.strip(), el["colspan"])
if el.has_attr("colspan")
else el.text.strip()
for el in header_elements
]
data_elements = soup.find_all("td")
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError(
"HTML tables must contain data in a <table> tag"
)
| HTMLSplitter |
python | keon__algorithms | tests/test_dp.py | {
"start": 3865,
"end": 4310
} | class ____(unittest.TestCase):
def test_get_maximum_value(self):
item1, item2, item3 = Item(60, 10), Item(100, 20), Item(120, 30)
self.assertEqual(220, get_maximum_value([item1, item2, item3], 50))
item1, item2, item3, item4 = Item(60, 5), Item(50, 3), Item(70, 4), Item(30, 2)
self.assertEqual(80, get_maximum_value([item1, item2, item3, item4],
5))
| TestKnapsack |
python | django__django | tests/migrations/test_migrations_conflict_long_name/0001_initial.py | {
"start": 43,
"end": 281
} | class ____(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
],
),
]
| Migration |
python | getsentry__sentry | src/sentry/middleware/stats.py | {
"start": 369,
"end": 817
} | class ____(MiddlewareMixin):
def process_response(self, request: Request, response: Response) -> Response:
metrics.incr("response", instance=str(response.status_code), skip_internal=False)
return response
def process_exception(self, request: Request, exception: Exception) -> None:
if not isinstance(exception, Http404):
metrics.incr("response", instance="500", skip_internal=False)
| ResponseCodeMiddleware |
python | numba__numba | numba/tests/test_iteration.py | {
"start": 6612,
"end": 7049
} | class ____(MemoryLeakMixin, TestCase):
def test_zip_with_arrays(self):
@njit
def foo(sequence):
c = 0
for a, b in zip(range(len(sequence)), sequence):
c += (a + 1) * b.sum()
return
sequence = [np.arange(1 + i) for i in range(10)]
self.assertEqual(foo(sequence), foo.py_func(sequence))
if __name__ == '__main__':
unittest.main()
| TestIterationRefct |
python | kamyu104__LeetCode-Solutions | Python/sort-even-and-odd-indices-independently.py | {
"start": 2720,
"end": 2962
} | class ____(object):
def sortEvenOdd(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums[::2], nums[1::2] = sorted(nums[::2]), sorted(nums[1::2], reverse=True)
return nums
| Solution3 |
python | langchain-ai__langchain | libs/cli/langchain_cli/namespaces/migrate/generate/utils.py | {
"start": 418,
"end": 7001
} | class ____(ast.NodeVisitor):
"""Import extractor."""
def __init__(self, *, from_package: str | None = None) -> None:
"""Extract all imports from the given code, optionally filtering by package."""
self.imports: list[tuple[str, str]] = []
self.package = from_package
@override
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module and (
self.package is None or str(node.module).startswith(self.package)
):
for alias in node.names:
self.imports.append((node.module, alias.name))
self.generic_visit(node)
def _get_class_names(code: str) -> list[str]:
"""Extract class names from a code string."""
# Parse the content of the file into an AST
tree = ast.parse(code)
# Initialize a list to hold all class names
class_names = []
# Define a node visitor class to collect class names
class ClassVisitor(ast.NodeVisitor):
@override
def visit_ClassDef(self, node: ast.ClassDef) -> None:
class_names.append(node.name)
self.generic_visit(node)
# Create an instance of the visitor and visit the AST
visitor = ClassVisitor()
visitor.visit(tree)
return class_names
def is_subclass(class_obj: type, classes_: list[type]) -> bool:
"""Check if the given class object is a subclass of any class in list classes.
Args:
class_obj: The class to check.
classes_: A list of classes to check against.
Returns:
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
"""
return any(
issubclass(class_obj, kls)
for kls in classes_
if inspect.isclass(class_obj) and inspect.isclass(kls)
)
def find_subclasses_in_module(module: ModuleType, classes_: list[type]) -> list[str]:
"""Find all classes in the module that inherit from one of the classes.
Args:
module: The module to inspect.
classes_: A list of classes to check against.
Returns:
A list of class names that are subclasses of any class in `classes_`.
"""
subclasses = []
# Iterate over all attributes of the module that are classes
for _name, obj in inspect.getmembers(module, inspect.isclass):
if is_subclass(obj, classes_):
subclasses.append(obj.__name__)
return subclasses
def _get_all_classnames_from_file(file: Path, pkg: str) -> list[tuple[str, str]]:
"""Extract all class names from a file."""
code = Path(file).read_text(encoding="utf-8")
module_name = _get_current_module(file, pkg)
class_names = _get_class_names(code)
return [(module_name, class_name) for class_name in class_names]
def identify_all_imports_in_file(
file: str,
*,
from_package: str | None = None,
) -> list[tuple[str, str]]:
"""Identify all the imports in the given file.
Args:
file: The file to analyze.
from_package: If provided, only return imports from this package.
Returns:
A list of tuples `(module, name)` representing the imports found in the file.
"""
code = Path(file).read_text(encoding="utf-8")
return find_imports_from_package(code, from_package=from_package)
def identify_pkg_source(pkg_root: str) -> pathlib.Path:
"""Identify the source of the package.
Args:
pkg_root: the root of the package. This contains source + tests, and other
things like pyproject.toml, lock files etc
Returns:
Returns the path to the source code for the package.
Raises:
ValueError: If there is not exactly one directory starting with `'langchain_'`
in the package root.
"""
dirs = [d for d in Path(pkg_root).iterdir() if d.is_dir()]
matching_dirs = [d for d in dirs if d.name.startswith("langchain_")]
if len(matching_dirs) != 1:
msg = "There should be only one langchain package."
raise ValueError(msg)
return matching_dirs[0]
def list_classes_by_package(pkg_root: str) -> list[tuple[str, str]]:
"""List all classes in a package.
Args:
pkg_root: the root of the package.
Returns:
A list of tuples `(module, class_name)` representing all classes found in the
package, excluding test files.
"""
module_classes = []
pkg_source = identify_pkg_source(pkg_root)
files = list(pkg_source.rglob("*.py"))
for file in files:
rel_path = os.path.relpath(file, pkg_root)
if rel_path.startswith("tests"):
continue
module_classes.extend(_get_all_classnames_from_file(file, pkg_root))
return module_classes
def list_init_imports_by_package(pkg_root: str) -> list[tuple[str, str]]:
"""List all the things that are being imported in a package by module.
Args:
pkg_root: the root of the package.
Returns:
A list of tuples `(module, name)` representing the imports found in
`__init__.py` files.
"""
imports = []
pkg_source = identify_pkg_source(pkg_root)
# Scan all the files in the package
files = list(Path(pkg_source).rglob("*.py"))
for file in files:
if file.name != "__init__.py":
continue
import_in_file = identify_all_imports_in_file(str(file))
module_name = _get_current_module(file, pkg_root)
imports.extend([(module_name, item) for _, item in import_in_file])
return imports
def find_imports_from_package(
code: str,
*,
from_package: str | None = None,
) -> list[tuple[str, str]]:
"""Find imports in code.
Args:
code: The code to analyze.
from_package: If provided, only return imports from this package.
Returns:
A list of tuples `(module, name)` representing the imports found.
"""
# Parse the code into an AST
tree = ast.parse(code)
# Create an instance of the visitor
extractor = ImportExtractor(from_package=from_package)
# Use the visitor to update the imports list
extractor.visit(tree)
return extractor.imports
def _get_current_module(path: Path, pkg_root: str) -> str:
"""Convert a path to a module name."""
relative_path = path.relative_to(pkg_root).with_suffix("")
posix_path = relative_path.as_posix()
norm_path = os.path.normpath(str(posix_path))
fully_qualified_module = norm_path.replace("/", ".")
# Strip __init__ if present
if fully_qualified_module.endswith(".__init__"):
return fully_qualified_module[:-9]
return fully_qualified_module
| ImportExtractor |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_A.py | {
"start": 6771,
"end": 7933
} | class ____(Benchmark):
r"""
AMGM objective function.
The AMGM (Arithmetic Mean - Geometric Mean Equality) global optimization
problem is a multimodal minimization problem defined as follows
.. math::
f_{\text{AMGM}}(x) = \left ( \frac{1}{n} \sum_{i=1}^{n} x_i -
\sqrt[n]{ \prod_{i=1}^{n} x_i} \right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_1 = x_2 = ... = x_n` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO, retrieved 2015
TODO: eqn 7 in [1]_ has the wrong global minimum value.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1, 1]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = sum(x)
f2 = prod(x)
f1 = f1 / self.N
f2 = f2 ** (1.0 / self.N)
f = (f1 - f2) ** 2
return f
| AMGM |
python | numpy__numpy | numpy/lib/tests/test_histograms.py | {
"start": 25521,
"end": 33951
} | class ____:
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2, 3, 3),
range=[[-1, 1], [0, 3], [0, 3]])
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
assert_array_equal(H, answer)
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
H, edges = histogramdd(x, bins=ed, density=True)
assert_(np.all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4),
range=[[-1, 1], [0, 3], [0, 4]],
density=True)
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
# Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
H, edges = histogramdd(
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
answer = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
assert_array_equal(H, answer)
Z = np.zeros((5, 5, 5))
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
assert_array_equal(H, Z)
def test_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = np.random.rand(10, 3)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = np.random.rand(10, 4)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_weights(self):
v = np.random.rand(100, 2)
hist, edges = histogramdd(v)
n_hist, edges = histogramdd(v, density=True)
w_hist, edges = histogramdd(v, weights=np.ones(100))
assert_array_equal(w_hist, hist)
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
def test_identical_samples(self):
x = np.zeros((10, 2), int)
hist, edges = histogramdd(x, bins=2)
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
def test_empty(self):
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, np.array([[0.]]))
a, b = np.histogramdd([[], [], []], bins=2)
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
def test_bins_errors(self):
# There are two ways to specify bins. Check for the right errors
# when mixing those.
x = np.arange(8).reshape(2, 4)
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
def test_rightmost_binedge(self):
# Test event very close to rightmost binedge. See Github issue #4266
x = [0.9999999995]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0000000001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
def test_finite_range(self):
vals = np.random.random((100, 3))
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
def test_equal_edges(self):
""" Test that adjacent entries in an edge array can be equal """
x = np.array([0, 1, 2])
y = np.array([0, 1, 2])
x_edges = np.array([0, 2, 2])
y_edges = 1
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
hist_expected = np.array([
[2.],
[1.], # x == 2 falls in the final bin
])
assert_equal(hist, hist_expected)
def test_edge_dtype(self):
""" Test that if an edge array is input, its type is preserved """
x = np.array([0, 10, 20])
y = x / 10
x_edges = np.array([0, 5, 15, 20])
y_edges = x_edges / 10
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(edges[0].dtype, x_edges.dtype)
assert_equal(edges[1].dtype, y_edges.dtype)
def test_large_integers(self):
big = 2**60 # Too large to represent with a full precision float
x = np.array([0], np.int64)
x_edges = np.array([-1, +1], np.int64)
y = big + x
y_edges = big + x_edges
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(hist[0, 0], 1)
def test_density_non_uniform_2d(self):
# Defines the following grid:
#
# 0 2 8
# 0+-+-----+
# + | +
# + | +
# 6+-+-----+
# 8+-+-----+
x_edges = np.array([0, 2, 8])
y_edges = np.array([0, 6, 8])
relative_areas = np.array([
[3, 9],
[1, 3]])
# ensure the number of points in each region is proportional to its area
x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9)
y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9)
# sanity check that the above worked as intended
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
assert_equal(hist, relative_areas)
# resulting histogram should be uniform, since counts and areas are proportional
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
assert_equal(hist, 1 / (8 * 8))
def test_density_non_uniform_1d(self):
# compare to histogram to show the results are the same
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
hist, edges = histogram(v, bins, density=True)
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
| TestHistogramdd |
python | huggingface__transformers | src/transformers/models/cwm/modular_cwm.py | {
"start": 9523,
"end": 9589
} | class ____(BaseModelOutputWithPast):
pass
| CwmModelOutputWithPast |
python | pytorch__pytorch | test/test_tensorboard.py | {
"start": 7469,
"end": 10243
} | class ____(BaseTestCase):
def test_to_HWC(self):
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "chw")
self.assertEqual(converted.shape, (32, 32, 3))
test_image = np.random.randint(0, 256, size=(16, 3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "nchw")
self.assertEqual(converted.shape, (64, 256, 3))
test_image = np.random.randint(0, 256, size=(32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "hw")
self.assertEqual(converted.shape, (32, 32, 3))
def test_convert_to_HWC_dtype_remains_same(self):
# test to ensure convert_to_HWC restores the dtype of input np array and
# thus the scale_factor calculated for the image is 1
test_image = torch.tensor([[[[1, 2, 3], [4, 5, 6]]]], dtype=torch.uint8)
tensor = make_np(test_image)
tensor = convert_to_HWC(tensor, "NCHW")
scale_factor = summary._calc_scale_factor(tensor)
self.assertEqual(
scale_factor,
1,
msg="Values are already in [0, 255], scale factor should be 1",
)
def test_prepare_video(self):
# At each timeframe, the sum over all other
# dimensions of the video should be the same.
shapes = [
(16, 30, 3, 28, 28),
(36, 30, 3, 28, 28),
(19, 29, 3, 23, 19),
(3, 3, 3, 3, 3),
]
for s in shapes:
V_input = np.random.random(s)
V_after = _prepare_video(np.copy(V_input))
total_frame = s[1]
V_input = np.swapaxes(V_input, 0, 1)
for f in range(total_frame):
x = np.reshape(V_input[f], newshape=(-1))
y = np.reshape(V_after[f], newshape=(-1))
np.testing.assert_array_almost_equal(np.sum(x), np.sum(y))
def test_numpy_vid_uint8(self):
V_input = np.random.randint(0, 256, (16, 30, 3, 28, 28)).astype(np.uint8)
V_after = _prepare_video(np.copy(V_input)) * 255
total_frame = V_input.shape[1]
V_input = np.swapaxes(V_input, 0, 1)
for f in range(total_frame):
x = np.reshape(V_input[f], newshape=(-1))
y = np.reshape(V_after[f], newshape=(-1))
np.testing.assert_array_almost_equal(np.sum(x), np.sum(y))
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
| TestTensorBoardUtils |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py | {
"start": 1157,
"end": 4140
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(PrefetchWithSlackTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(test_base.default_test_combinations())
def testPrefetchWithSlackOption(self):
"""Determines slack_period based on num devices attached to iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = options_lib.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testPrefetchWithSlackOptionWithoutIterator(self):
"""Defaults to slack period of 1 without iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = options_lib.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
@combinations.generate(test_base.default_test_combinations())
def testWithPassthroughDataset(self):
"""Should still work with a passthrough dataset after prefetch()."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.map(lambda x: x + 1)
options = options_lib.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(1, 11))
@combinations.generate(test_base.default_test_combinations())
def testNoErrorWithoutPrefetch(self):
"""The rewrite should not fail if there is no prefetch() in the pipeline."""
dataset = dataset_ops.Dataset.range(10)
options = options_lib.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
@combinations.generate(test_base.default_test_combinations())
def testNoErrorWithInvalidDataset(self):
"""With a nested dataset op after prefetch, the rewrite should fail."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors)
options = options_lib.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
if __name__ == "__main__":
test.main()
| PrefetchWithSlackTest |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/exact_match/base.py | {
"start": 144,
"end": 3058
} | class ____(StringEvaluator):
"""Compute an exact match between the prediction and the reference.
Examples:
----------
>>> evaluator = ExactMatchChain()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CTO",
) # This will return {'score': 1.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CEO",
) # This will return {'score': 0.0}
"""
def __init__(
self,
*,
ignore_case: bool = False,
ignore_punctuation: bool = False,
ignore_numbers: bool = False,
**_: Any,
):
"""Initialize the `ExactMatchStringEvaluator`.
Args:
ignore_case: Whether to ignore case when comparing strings.
ignore_punctuation: Whether to ignore punctuation when comparing strings.
ignore_numbers: Whether to ignore numbers when comparing strings.
"""
super().__init__()
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.ignore_numbers = ignore_numbers
@property
def requires_input(self) -> bool:
"""This evaluator does not require input."""
return False
@property
def requires_reference(self) -> bool:
"""This evaluator requires a reference."""
return True
@property
def input_keys(self) -> list[str]:
"""Get the input keys.
Returns:
The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""Get the evaluation name.
Returns:
The evaluation name.
"""
return "exact_match"
@override
def _evaluate_strings( # type: ignore[override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""Evaluate the exact match between the prediction and the reference.
Args:
prediction: The prediction string.
reference: The reference string.
**kwargs: Additional keyword arguments (not used).
Returns:
The evaluation results containing the score.
"""
if self.ignore_case:
prediction = prediction.lower()
reference = reference.lower()
if self.ignore_punctuation:
prediction = prediction.translate(str.maketrans("", "", string.punctuation))
reference = reference.translate(str.maketrans("", "", string.punctuation))
if self.ignore_numbers:
prediction = prediction.translate(str.maketrans("", "", string.digits))
reference = reference.translate(str.maketrans("", "", string.digits))
return {"score": int(prediction == reference)}
| ExactMatchStringEvaluator |
python | tornadoweb__tornado | demos/s3server/s3server.py | {
"start": 1999,
"end": 2800
} | class ____(web.Application):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0):
web.Application.__init__(
self,
[
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
],
)
self.directory = os.path.abspath(root_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.bucket_depth = bucket_depth
| S3Application |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modeling_glm4v_moe.py | {
"start": 24085,
"end": 24822
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Glm4vMoeTextRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| Glm4vMoeTextRMSNorm |
python | networkx__networkx | networkx/algorithms/tests/test_cuts.py | {
"start": 4850,
"end": 5376
} | class ____:
"""Unit tests for the :func:`~networkx.mixing_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.mixing_expansion(G, S, T)
# There is one cut edge, and the total number of edges in the
# graph is twice the total number of edges in a clique of size
# five, plus one more for the bridge.
expected = 1 / (2 * (5 * 4 + 1))
assert expected == expansion
| TestMixingExpansion |
python | doocs__leetcode | solution/2600-2699/2678.Number of Senior Citizens/Solution.py | {
"start": 0,
"end": 127
} | class ____:
def countSeniors(self, details: List[str]) -> int:
return sum(int(x[11:13]) > 60 for x in details)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 830187,
"end": 830597
} | class ____(sgqlc.types.Type):
"""Represents a object that contains package activity statistics such
as downloads.
"""
__schema__ = github_schema
__field_names__ = ("downloads_total_count",)
downloads_total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="downloadsTotalCount")
"""Number of times the package was downloaded since it was created."""
| PackageStatistics |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/core/dbt_cli_invocation.py | {
"start": 1469,
"end": 1655
} | class ____(NamedTuple):
"""Hashable representation of the information needed to identify a relation in a database."""
database: str
schema: str
identifier: str
| RelationKey |
python | numba__numba | numba/tests/test_dispatcher.py | {
"start": 30217,
"end": 32504
} | class ____(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_can_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
self.assertEqual(foo(fn), fn)
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
| TestDispatcherFunctionBoundaries |
python | pyca__cryptography | src/cryptography/hazmat/primitives/ciphers/base.py | {
"start": 1411,
"end": 1632
} | class ____(CipherContext, metaclass=abc.ABCMeta):
@abc.abstractmethod
def authenticate_additional_data(self, data: Buffer) -> None:
"""
Authenticates the provided bytes.
"""
| AEADCipherContext |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/vt100_parser.py | {
"start": 984,
"end": 1077
} | class ____:
"""Helper object to indicate flush operation to the parser."""
pass
| _Flush |
python | skorch-dev__skorch | skorch/callbacks/logging.py | {
"start": 28855,
"end": 34232
} | class ____(Callback):
"""Logs results from history to Sacred.
Sacred is a tool to help you configure, organize, log and reproduce
experiments. Developed at IDSIA. See https://github.com/IDSIA/sacred.
Use this callback to automatically log all interesting values from
your net's history to Sacred.
If you want to log additional information, you can simply add it to
``History``. See the documentation on ``Callbacks``, and ``Scoring`` for
more information. Alternatively you can subclass this callback and extend
the ``on_*`` methods.
To use this logger, you first have to install Sacred:
.. code-block:: bash
python -m pip install sacred
You might also install pymongo to use a mongodb backend. See the `upstream
documentation <https://github.com/IDSIA/sacred#installing>`_ for more
details. Once you have installed it, you can set up a simple experiment and
pass this Logger as a callback to your skorch estimator:
Examples
--------
>>> # contents of sacred-experiment.py
>>> import numpy as np
>>> from sacred import Experiment
>>> from sklearn.datasets import make_classification
>>> from skorch.callbacks.logging import SacredLogger
>>> from skorch.callbacks.scoring import EpochScoring
>>> from skorch import NeuralNetClassifier
>>> from skorch.toy import make_classifier
>>> ex = Experiment()
>>> @ex.config
>>> def my_config():
... max_epochs = 20
... lr = 0.01
>>> X, y = make_classification()
>>> X, y = X.astype(np.float32), y.astype(np.int64)
>>> @ex.automain
>>> def main(_run, max_epochs, lr):
... # Take care to add additional scoring callbacks *before* the logger.
... net = NeuralNetClassifier(
... make_classifier(),
... max_epochs=max_epochs,
... lr=0.01,
... callbacks=[EpochScoring("f1"), SacredLogger(_run)]
... )
... # now fit your estimator to your data
... net.fit(X, y)
Then call this from the command line, e.g. like this:
.. code-block:: bash
python sacred-script.py with max_epochs=15
You can also change other options on the command line and optionally
specify a backend.
Parameters
----------
experiment : sacred.Experiment
Instantiated ``Experiment`` class.
log_on_batch_end : bool (default=False)
Whether to log loss and other metrics on batch level.
log_on_epoch_end : bool (default=True)
Whether to log loss and other metrics on epoch level.
batch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) "_batch" is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
epoch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) "_epoch" is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to Sacred. Note that in
addition to the keys provided by the user, keys such as those starting
with ``'event_'`` or ending on ``'_best'`` are ignored by default.
"""
def __init__(
self,
experiment,
log_on_batch_end=False,
log_on_epoch_end=True,
batch_suffix=None,
epoch_suffix=None,
keys_ignored=None,
):
self.experiment = experiment
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
self.batch_suffix = batch_suffix
self.epoch_suffix = epoch_suffix
self.keys_ignored = keys_ignored
def initialize(self):
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add("batches")
self.batch_suffix_ = self.batch_suffix
self.epoch_suffix_ = self.epoch_suffix
if self.batch_suffix_ is None:
self.batch_suffix_ = (
"_batch" if self.log_on_batch_end and self.log_on_epoch_end else ""
)
if self.epoch_suffix_ is None:
self.epoch_suffix_ = (
"_epoch" if self.log_on_batch_end and self.log_on_epoch_end else ""
)
return self
def on_batch_end(self, net, **kwargs):
if not self.log_on_batch_end:
return
batch_logs = net.history[-1]["batches"][-1]
for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_):
# skorch does not keep a batch count, but sacred will
# automatically associate the results with a counter.
self.experiment.log_scalar(key + self.batch_suffix_, batch_logs[key])
def on_epoch_end(self, net, **kwargs):
"""Automatically log values from the last history step."""
if not self.log_on_epoch_end:
return
epoch_logs = net.history[-1]
epoch = epoch_logs["epoch"]
for key in filter_log_keys(epoch_logs.keys(), self.keys_ignored_):
self.experiment.log_scalar(key + self.epoch_suffix_, epoch_logs[key], epoch)
| SacredLogger |
python | doocs__leetcode | solution/0200-0299/0222.Count Complete Tree Nodes/Solution2.py | {
"start": 192,
"end": 660
} | class ____:
def countNodes(self, root: Optional[TreeNode]) -> int:
def depth(root):
d = 0
while root:
d += 1
root = root.left
return d
if root is None:
return 0
left, right = depth(root.left), depth(root.right)
if left == right:
return (1 << left) + self.countNodes(root.right)
return (1 << right) + self.countNodes(root.left)
| Solution |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 105406,
"end": 106918
} | class ____(torch.nn.Module):
def forward(
self,
primals_1: "Sym(s16)", # PlainAOTInput(idx=0)
primals_2: "f32[3, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=1), attr='a')
primals_3: "f32[3, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=1), attr='b')
primals_4: "Sym(s16)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=1), idx=1)
primals_5: "Sym(s16)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=1), idx=0)
):
clone: "f32[3, s16]" = torch.ops.aten.clone.default(primals_2); primals_2 = None
clone_1: "f32[3, s16]" = torch.ops.aten.clone.default(primals_3); primals_3 = None
view: "f32[3*s16]" = torch.ops.aten.view.default(clone, [-1])
sym_size_int_2: "Sym(3*s16)" = torch.ops.aten.sym_size.int(view, 0)
view_1: "f32[3*s16]" = torch.ops.aten.view.default(clone_1, [-1])
return (
clone, # PlainAOTOutput(idx=0)
view, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=1), attr='a')
view_1, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=1), attr='b')
sym_size_int_2, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=1), idx=0)
clone_1, # PlainAOTOutput(idx=2)
primals_5, # SavedForBackwardsAOTOutput(idx=0)
)
""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(bw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | Textualize__textual | tests/input/test_cut_copy_paste.py | {
"start": 79,
"end": 1714
} | class ____(App):
def compose(self) -> ComposeResult:
yield Input()
async def test_cut():
"""Check that cut removes text and places it in the clipboard."""
app = InputApp()
async with app.run_test() as pilot:
input = app.query_one(Input)
await pilot.click(input)
await pilot.press(*"Hello, World")
await pilot.press("left", "shift+left", "shift+left")
await pilot.press("ctrl+x")
assert input.value == "Hello, Wod"
assert app.clipboard == "rl"
async def test_copy():
"""Check that copy places text in the clipboard."""
app = InputApp()
async with app.run_test() as pilot:
input = app.query_one(Input)
await pilot.click(input)
await pilot.press(*"Hello, World")
await pilot.press("left", "shift+left", "shift+left")
await pilot.press("ctrl+c")
assert input.value == "Hello, World"
assert app.clipboard == "rl"
async def test_paste():
"""Check that paste copies text from the local clipboard."""
app = InputApp()
async with app.run_test() as pilot:
input = app.query_one(Input)
await pilot.click(input)
await pilot.press(*"Hello, World")
await pilot.press(
"shift+left", "shift+left", "shift+left", "shift+left", "shift+left"
)
await pilot.press("ctrl+c")
assert input.value == "Hello, World"
assert app.clipboard == "World"
await pilot.press("ctrl+v")
assert input.value == "Hello, World"
await pilot.press("ctrl+v")
assert input.value == "Hello, WorldWorld"
| InputApp |
python | huggingface__transformers | tests/models/gpt_sw3/test_tokenization_gpt_sw3.py | {
"start": 950,
"end": 8047
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "AI-Sweden-Models/gpt-sw3-126m"
tokenizer_class = GPTSw3Tokenizer
test_rust_tokenizer = False
test_sentencepiece = True
test_sentencepiece_ignore_case = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# We have a SentencePiece fixture for testing
tokenizer = GPTSw3Tokenizer(
SAMPLE_VOCAB, eos_token="<unk>", bos_token="<unk>", pad_token="<unk>", name_or_path="test"
)
tokenizer.save_pretrained(cls.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = "This is a test"
output_text = "This is a test"
return input_text, output_text
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "j")
self.assertEqual(len(vocab_keys), 2_000)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 2_000)
def test_full_tokenizer(self):
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB, name_or_path="test")
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [465, 287, 265, 631, 842])
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
# fmt: off
self.assertListEqual(
tokens,
["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."],
)
# fmt: on
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
# fmt: off
self.assertListEqual(
back_tokens,
["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."]
)
# fmt: on
def test_fast_encode_decode(self):
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB, name_or_path="test")
texts = ["This is a test", "I was born in 92000, and this is falsé."]
expected_ids_list = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(texts, expected_ids_list):
self.assertListEqual(tokenizer.encode_fast(text), expected_ids)
# Test that decode_fast returns the input text
for text, token_ids in zip(texts, expected_ids_list):
self.assertEqual(tokenizer.decode_fast(token_ids), text)
@slow
def test_tokenizer_integration(self):
sequences = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
expected_encoding = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="AI-Sweden-Models/gpt-sw3-126m",
sequences=sequences,
)
@require_jinja
def test_tokenization_for_chat(self):
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB, name_or_path="test")
tokenizer.chat_template = (
"{{ eos_token }}{{ bos_token }}"
"{% for message in messages %}"
"{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}"
"{% else %}{{ 'Bot: ' + message['content']}}{% endif %}"
"{{ message['text'] }}{{ bos_token }}"
"{% endfor %}"
"Bot:"
)
# This is in English, but it's just here to make sure the chat control tokens are being added properly
test_chats = [
[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
[
{"role": "system", "content": "You are a helpful chatbot."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Nice to meet you."},
],
[{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}],
]
tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
# fmt: off
expected_tokens = [
[2000, 1, 575, 541, 419, 530, 339, 265, 878, 708, 727, 275, 347, 541, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419],
[2000, 1, 575, 541, 419, 530, 339, 265, 878, 708, 727, 275, 347, 541, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419, 984, 429, 281, 264, 1261, 291, 260, 1, 575, 541, 419],
[2000, 1, 575, 541, 419, 984, 429, 281, 264, 1261, 291, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419]
]
# fmt: on
for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
self.assertListEqual(tokenized_chat, expected_tokens)
| GPTSw3TokenizationTest |
python | walkccc__LeetCode | solutions/225. Implement Stack using Queues/225.py | {
"start": 0,
"end": 350
} | class ____:
def __init__(self):
self.q = collections.deque()
def push(self, x: int) -> None:
self.q.append(x)
for _ in range(len(self.q) - 1):
self.q.append(self.q.popleft())
def pop(self) -> int:
return self.q.popleft()
def top(self) -> int:
return self.q[0]
def empty(self) -> bool:
return not self.q
| MyStack |
python | google__pytype | pytype/abstract/function.py | {
"start": 34416,
"end": 34711
} | class ____(abc.ABC):
"""Wrapper for a function return type."""
@property
@abc.abstractmethod
def name(self):
...
@abc.abstractmethod
def instantiate_parameter(self, node, param_name):
...
@abc.abstractmethod
def get_parameter(self, node, param_name):
...
| _ReturnType |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 519,
"end": 647
} | class ____:
def __init__(self):
pass
def __getattr__(self, key):
return DynamicAccessor()
| DynamicAccessor |
python | huggingface__transformers | tests/models/qwen2_vl/test_modeling_qwen2_vl.py | {
"start": 14840,
"end": 27342
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
self.messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
@slow
def test_small_model_integration_test(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text], images=[self.image], return_tensors="pt")
expected_input_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151652, 151655, 151655] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[0.8792, 0.8792, 0.9084],
[1.1858, 1.1858, 1.2296],
[1.2004, 1.2004, 1.2150],
[1.4340, 1.4340, 1.4194],
[1.3902, 1.4048, 1.4194],
[1.5216, 1.5362, 1.5362],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to(
torch_device
)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_expand(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text], images=[self.image], return_tensors="pt").to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, num_return_sequences=3)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_wo_image(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
messages2 = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who are you?"},
]
text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to(
torch_device
)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am a large language model created by Alibaba Cloud. I am called Qwen.'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_different_resolutions(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
image2 = self.image.resize((224, 224))
inputs = self.processor(text=[text, text2], images=[self.image, image2], padding=True, return_tensors="pt").to(
torch_device
)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_DECODED_TEXTS = Expectations(
{
("xpu", 3): [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
],
("cuda", None): [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets',
],
("cuda", 8): [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices'
],
}
) # fmt: skip
EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation()
self.assertEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT)
@slow
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
def test_small_model_integration_test_batch_flashatt2(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to(
torch_device
)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices",
"system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices",
]
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
model = Qwen2VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-VL-7B-Instruct",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
messages2 = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who are you?"},
]
text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to(
torch_device
)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices',
'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am a large language model created by Alibaba Cloud. I am called Qwen.'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| Qwen2VLIntegrationTest |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_fk_constraint_app/migrations/0003_delete.py | {
"start": 190,
"end": 571
} | class ____(CheckedMigration):
dependencies = [
(
"good_flow_delete_field_pending_with_fk_constraint_app",
"0002_remove_constraints_and_pending",
),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="fk_table",
deletion_action=DeletionAction.DELETE,
),
]
| Migration |
python | huggingface__transformers | tests/models/apertus/test_modeling_apertus.py | {
"start": 1330,
"end": 1463
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = ApertusModel
@require_torch
| ApertusModelTester |
python | scipy__scipy | scipy/fftpack/tests/test_basic.py | {
"start": 9189,
"end": 11199
} | class ____:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec, err_msg=f"size={size}")
assert_array_almost_equal(y2, x, decimal=self.ndec, err_msg=f"size={size}")
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
| _TestIRFFTBase |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/resource.py | {
"start": 4786,
"end": 6179
} | class ____(KubernetesResourceBaseOperator):
"""Create a resource in a kubernetes."""
def create_custom_from_yaml_object(self, body: dict):
group, version, namespace, plural = self.get_crd_fields(body)
if self.namespaced:
self.custom_object_client.create_namespaced_custom_object(group, version, namespace, plural, body)
else:
self.custom_object_client.create_cluster_custom_object(group, version, plural, body)
@generic_api_retry
def _create_objects(self, objects):
self.log.info("Starting resource creation")
if not self.custom_resource_definition:
create_from_yaml(
k8s_client=self.client,
yaml_objects=objects,
namespace=self.get_namespace(),
)
else:
k8s_resource_iterator(self.create_custom_from_yaml_object, objects)
def execute(self, context) -> None:
if self.yaml_conf:
self._create_objects(yaml.safe_load_all(self.yaml_conf))
elif self.yaml_conf_file and os.path.exists(self.yaml_conf_file):
with open(self.yaml_conf_file) as stream:
self._create_objects(yaml.safe_load_all(stream))
else:
raise AirflowException("File %s not found", self.yaml_conf_file)
self.log.info("Resource was created")
| KubernetesCreateResourceOperator |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 191928,
"end": 194568
} | class ____(Response):
"""
Response of events.vector_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "vector_metrics_iter_histogram"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(VectorMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
response_mapping = {
AddRequest: AddResponse,
AddBatchRequest: AddBatchResponse,
DeleteForTaskRequest: DeleteForTaskResponse,
DeleteForModelRequest: DeleteForModelResponse,
DebugImagesRequest: DebugImagesResponse,
PlotsRequest: PlotsResponse,
GetDebugImageSampleRequest: GetDebugImageSampleResponse,
NextDebugImageSampleRequest: NextDebugImageSampleResponse,
GetPlotSampleRequest: GetPlotSampleResponse,
NextPlotSampleRequest: NextPlotSampleResponse,
GetTaskMetricsRequest: GetTaskMetricsResponse,
GetTaskLogRequest: GetTaskLogResponse,
GetTaskEventsRequest: GetTaskEventsResponse,
DownloadTaskLogRequest: DownloadTaskLogResponse,
GetTaskPlotsRequest: GetTaskPlotsResponse,
GetMultiTaskPlotsRequest: GetMultiTaskPlotsResponse,
GetVectorMetricsAndVariantsRequest: GetVectorMetricsAndVariantsResponse,
VectorMetricsIterHistogramRequest: VectorMetricsIterHistogramResponse,
ScalarMetricsIterHistogramRequest: ScalarMetricsIterHistogramResponse,
MultiTaskScalarMetricsIterHistogramRequest: MultiTaskScalarMetricsIterHistogramResponse,
GetTaskSingleValueMetricsRequest: GetTaskSingleValueMetricsResponse,
GetTaskLatestScalarValuesRequest: GetTaskLatestScalarValuesResponse,
GetScalarMetricsAndVariantsRequest: GetScalarMetricsAndVariantsResponse,
GetScalarMetricDataRequest: GetScalarMetricDataResponse,
ScalarMetricsIterRawRequest: ScalarMetricsIterRawResponse,
ClearScrollRequest: ClearScrollResponse,
ClearTaskLogRequest: ClearTaskLogResponse,
}
| VectorMetricsIterHistogramResponse |
python | getsentry__sentry | tests/sentry/models/test_groupresolution.py | {
"start": 171,
"end": 9761
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.old_release = self.create_release(
version="a", date_added=timezone.now() - timedelta(minutes=30)
)
self.new_release = self.create_release(version="b")
self.group = self.create_group()
self.old_semver_release = self.create_release(version="foo_package@1.0")
self.new_semver_release = self.create_release(version="foo_package@2.0")
def test_in_next_release_with_new_release(self) -> None:
GroupResolution.objects.create(
release=self.old_release, group=self.group, type=GroupResolution.Type.in_next_release
)
assert not GroupResolution.has_resolution(self.group, self.new_release)
def test_in_next_release_with_same_release(self) -> None:
GroupResolution.objects.create(
release=self.old_release, group=self.group, type=GroupResolution.Type.in_next_release
)
assert GroupResolution.has_resolution(self.group, self.old_release)
def test_in_next_release_with_old_release(self) -> None:
GroupResolution.objects.create(
release=self.new_release, group=self.group, type=GroupResolution.Type.in_next_release
)
assert GroupResolution.has_resolution(self.group, self.old_release)
def test_for_semver_when_current_release_version_is_set_with_new_semver_release(self) -> None:
# Behaviour should be the same in both `in_release` and `in_next_release` because if
# `current_release_version` is set then comparison will be > current_release_version
# should not have a resolution
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.old_semver_release,
current_release_version=self.old_semver_release.version,
group=self.group,
type=grp_res_type,
)
assert not GroupResolution.has_resolution(self.group, self.new_semver_release)
grp_resolution.delete()
def test_for_semver_when_current_release_version_is_set_with_same_release(self) -> None:
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.old_semver_release,
current_release_version=self.old_semver_release.version,
group=self.group,
type=grp_res_type,
)
assert GroupResolution.has_resolution(self.group, self.old_semver_release)
grp_resolution.delete()
def test_for_semver_when_current_release_version_is_set_with_old_semver_release(self) -> None:
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.new_semver_release,
current_release_version=self.new_semver_release.version,
group=self.group,
type=grp_res_type,
)
assert GroupResolution.has_resolution(self.group, self.old_semver_release)
grp_resolution.delete()
def test_when_current_release_version_is_set_with_new_release(self) -> None:
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.old_release,
current_release_version=self.old_release.version,
group=self.group,
type=grp_res_type,
)
assert not GroupResolution.has_resolution(self.group, self.new_release)
grp_resolution.delete()
def test_when_current_release_version_is_set_with_same_release(self) -> None:
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.old_release,
current_release_version=self.old_release.version,
group=self.group,
type=grp_res_type,
)
assert GroupResolution.has_resolution(self.group, self.old_release)
grp_resolution.delete()
def test_when_current_release_version_is_set_with_old_release(self) -> None:
for grp_res_type in [GroupResolution.Type.in_release, GroupResolution.Type.in_next_release]:
grp_resolution = GroupResolution.objects.create(
release=self.new_release,
current_release_version=self.new_release.version,
group=self.group,
type=grp_res_type,
)
assert GroupResolution.has_resolution(self.group, self.old_release)
grp_resolution.delete()
def test_when_current_release_version_is_set_incorrect_inputs_fallback_to_older_model(
self,
) -> None:
"""
Test that ensures in a project that follows semver and where current_release_version is
set, wrong release input (non semver) comparison does not break the method, but rather
fallsback to the older model of comparison
"""
old_random_release = self.create_release(
date_added=timezone.now() - timedelta(minutes=45), version="doggo"
)
GroupResolution.objects.create(
release=old_random_release,
current_release_version=old_random_release.version,
group=self.group,
type=GroupResolution.Type.in_next_release,
)
for release in [
self.old_release,
self.new_release,
self.old_semver_release,
self.new_semver_release,
]:
assert not GroupResolution.has_resolution(self.group, release)
def test_when_current_release_version_is_set_but_does_not_exist_fallback_to_older_model(
self,
) -> None:
"""
Test that ensures in a project that does not follows semver, and current_release_version
is set but no corresponding Release instance exists for that release version then
comparison does not break the method, but rather fallsback to the older model
"""
GroupResolution.objects.create(
release=self.old_release,
current_release_version="kittie 12",
group=self.group,
type=GroupResolution.Type.in_next_release,
)
for release in [self.new_release, self.old_semver_release, self.new_semver_release]:
assert not GroupResolution.has_resolution(self.group, release)
def test_in_release_with_new_release(self) -> None:
GroupResolution.objects.create(
release=self.old_release, group=self.group, type=GroupResolution.Type.in_release
)
assert not GroupResolution.has_resolution(self.group, self.new_release)
def test_in_release_with_current_release(self) -> None:
GroupResolution.objects.create(
release=self.old_release, group=self.group, type=GroupResolution.Type.in_release
)
assert not GroupResolution.has_resolution(self.group, self.old_release)
def test_in_release_with_old_release(self) -> None:
GroupResolution.objects.create(
release=self.new_release, group=self.group, type=GroupResolution.Type.in_release
)
assert GroupResolution.has_resolution(self.group, self.old_release)
def test_for_semver_in_release_with_new_release(self) -> None:
GroupResolution.objects.create(
release=self.old_semver_release, group=self.group, type=GroupResolution.Type.in_release
)
assert not GroupResolution.has_resolution(self.group, self.new_semver_release)
def test_for_semver_in_release_with_current_release(self) -> None:
GroupResolution.objects.create(
release=self.old_semver_release, group=self.group, type=GroupResolution.Type.in_release
)
assert not GroupResolution.has_resolution(self.group, self.old_semver_release)
def test_for_semver_in_release_with_old_release(self) -> None:
GroupResolution.objects.create(
release=self.new_semver_release, group=self.group, type=GroupResolution.Type.in_release
)
assert GroupResolution.has_resolution(self.group, self.old_semver_release)
def test_no_release_with_resolution(self) -> None:
GroupResolution.objects.create(
release=self.new_release, group=self.group, type=GroupResolution.Type.in_release
)
assert GroupResolution.has_resolution(self.group, None)
def test_no_release_with_no_resolution(self) -> None:
assert not GroupResolution.has_resolution(self.group, None)
def test_all_resolutions_are_implemented(self) -> None:
resolution_types = [
attr for attr in vars(GroupResolution.Type) if not attr.startswith("__")
]
for resolution_type in resolution_types:
resolution = GroupResolution.objects.create(
release=self.new_release,
group=self.group,
type=getattr(GroupResolution.Type, resolution_type),
)
assert (
GroupResolution.has_resolution(self.group, self.old_release) is not NotImplemented
)
resolution.delete()
| GroupResolutionTest |
python | hynek__structlog | tests/test_config.py | {
"start": 10963,
"end": 13276
} | class ____:
def test_wrap_passes_args(self):
"""
wrap_logger propagates all arguments to the wrapped bound logger.
"""
logger = object()
p = wrap_logger(logger, processors=[1, 2, 3], context_class=dict)
assert logger is p._logger
assert [1, 2, 3] == p._processors
assert dict is p._context_class
def test_empty_processors(self):
"""
An empty list is a valid value for processors so it must be preserved.
"""
# We need to do a bind such that we get an actual logger and not just
# a lazy proxy.
logger = wrap_logger(object(), processors=[]).new()
assert [] == logger._processors
def test_wrap_returns_proxy(self):
"""
wrap_logger always returns a lazy proxy.
"""
assert isinstance(wrap_logger(None), BoundLoggerLazyProxy)
def test_configure_once_issues_warning_on_repeated_call(self):
"""
configure_once raises a warning when it's after configuration.
"""
with warnings.catch_warnings(record=True) as warns:
configure_once()
assert 0 == len(warns)
with warnings.catch_warnings(record=True) as warns:
configure_once()
assert 1 == len(warns)
assert RuntimeWarning is warns[0].category
assert "Repeated configuration attempted." == warns[0].message.args[0]
def test_get_logger_configures_according_to_config(self):
"""
get_logger returns a correctly configured bound logger.
"""
b = get_logger().bind()
assert isinstance(
b._logger, _BUILTIN_DEFAULT_LOGGER_FACTORY().__class__
)
assert _BUILTIN_DEFAULT_PROCESSORS == b._processors
assert isinstance(b, _BUILTIN_DEFAULT_WRAPPER_CLASS)
assert _BUILTIN_DEFAULT_CONTEXT_CLASS == b._context.__class__
def test_get_logger_passes_positional_arguments_to_logger_factory(self):
"""
Ensure `get_logger` passes optional positional arguments through to
the logger factory.
"""
factory = call_recorder(lambda *args: object())
configure(logger_factory=factory)
get_logger("test").bind(x=42)
assert [call("test")] == factory.call_args_list
| TestFunctions |
python | scikit-learn__scikit-learn | sklearn/externals/_packaging/_structures.py | {
"start": 2196,
"end": 2922
} | class ____:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
| NegativeInfinityType |
python | doocs__leetcode | solution/3200-3299/3290.Maximum Multiplication Score/Solution.py | {
"start": 0,
"end": 358
} | class ____:
def maxScore(self, a: List[int], b: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if j >= len(b):
return 0 if i >= len(a) else -inf
if i >= len(a):
return 0
return max(dfs(i, j + 1), a[i] * b[j] + dfs(i + 1, j + 1))
return dfs(0, 0)
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/components/dbt_project/component.py | {
"start": 2320,
"end": 3696
} | class ____(dg.Resolvable):
"""Aligns with DbtProject.__new__."""
project_dir: str
target_path: Optional[str] = None
profiles_dir: Optional[str] = None
profile: Optional[str] = None
target: Optional[str] = None
packaged_project_dir: Optional[str] = None
state_path: Optional[str] = None
def resolve_dbt_project(context: ResolutionContext, model) -> DbtProjectManager:
if isinstance(model, RemoteGitDbtProjectManager.model()):
return RemoteGitDbtProjectManager.resolve_from_model(context, model)
args = (
DbtProjectArgs(project_dir=context.resolve_value(model, as_type=str))
if isinstance(model, str)
else DbtProjectArgs.resolve_from_model(context, model)
)
# resolve the project_dir relative to where this component is defined
args = replace(args, project_dir=context.resolve_source_relative_path(args.project_dir))
return DbtProjectArgsManager(args)
DbtMetadataAddons: TypeAlias = Literal["column_metadata", "row_count"]
_resolution_context: ContextVar[ResolutionContext] = ContextVar("resolution_context")
@contextmanager
def _set_resolution_context(context: ResolutionContext):
token = _resolution_context.set(context)
try:
yield
finally:
_resolution_context.reset(token)
@public
@scaffold_with(DbtProjectComponentScaffolder)
@dataclass
| DbtProjectArgs |
python | sqlalchemy__sqlalchemy | test/sql/test_delete.py | {
"start": 7270,
"end": 12212
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("name", String(30), nullable=False),
Column("email_address", String(50), nullable=False),
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
)
Table(
"update_w_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("ycol", Integer, key="y"),
Column("data", String(30), onupdate=lambda: "hi"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name"),
(7, "jack"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
),
addresses=(
("id", "user_id", "name", "email_address"),
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed@wood.com"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
),
dingalings=(
("id", "address_id", "data"),
(1, 2, "ding 1/2"),
(2, 5, "ding 2/5"),
),
)
@testing.requires.delete_using
def test_exec_two_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(dingalings.delete()) # fk violation otherwise
connection.execute(
addresses.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_table(connection, addresses, expected)
@testing.requires.delete_using
def test_exec_three_table(self, connection):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(
dingalings.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == dingalings.c.address_id)
)
expected = [(2, 5, "ding 2/5")]
self._assert_table(connection, dingalings, expected)
@testing.requires.delete_using
def test_exec_two_table_plus_alias(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(dingalings.delete()) # fk violation otherwise
a1 = addresses.alias()
connection.execute(
addresses.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(a1.c.id == addresses.c.id)
)
expected = [(1, 7, "x", "jack@bean.com"), (5, 9, "x", "fred@fred.com")]
self._assert_table(connection, addresses, expected)
@testing.requires.delete_using
def test_exec_alias_plus_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
d1 = dingalings.alias()
connection.execute(
delete(d1)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == d1.c.address_id)
)
expected = [(2, 5, "ding 2/5")]
self._assert_table(connection, dingalings, expected)
def _assert_table(self, connection, table, expected):
stmt = table.select().order_by(table.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
| DeleteFromRoundTripTest |
python | pytorch__pytorch | test/inductor/test_device_assert.py | {
"start": 539,
"end": 3298
} | class ____(TestCase):
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_assert_should_throw(self, backend):
def func():
a = torch.tensor([1.0, -2.0], device="cpu")
result = torch.all(a > 0)
assert result, "should throw"
def func_inline():
a = torch.tensor([1.0, -2.0], device="cpu")
assert torch.all(a > 0), "should throw"
with self.assertRaisesRegex(RuntimeError, "should throw"):
torch._dynamo.reset()
f_c = torch.compile(func, backend=backend)
f_c()
with self.assertRaisesRegex(RuntimeError, "should throw"):
torch._dynamo.reset()
f_c = torch.compile(func_inline, backend=backend)
f_c()
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_assert_should_not_throw(self, backend):
def func():
a = torch.tensor([1.0, 2.0], device="cpu")
result = torch.all(a > 0)
assert result, "should throw"
def func_inline():
a = torch.tensor([1.0, 2.0], device="cpu")
assert torch.all(a > 0), "should throw"
torch._dynamo.reset()
f_c = torch.compile(func, backend=backend)
f_c()
torch._dynamo.reset()
f_c = torch.compile(func_inline, backend=backend)
f_c()
@requires_gpu_and_triton
@skipIfRocm
@torch._inductor.config.patch(force_disable_caches=True)
def test_assert_fusion(self):
torch._logging.set_logs(inductor_metrics=True)
def func():
a = torch.tensor([1.0, 2.0], device=device_type)
result = torch.all(a > 0)
assert result, "should throw"
torch._dynamo.reset()
f_c = torch.compile(func, backend="inductor")
metrics.reset()
self.assertEqual(metrics.generated_kernel_count, 0)
f_c()
self.assertEqual(metrics.generated_kernel_count, 1)
torch._logging.set_logs()
@requires_gpu_and_triton
@skipIfRocm
@torch._inductor.config.patch(force_disable_caches=True)
def test_run_assert_triton(self):
@torch.compile(backend="inductor")
def fn():
a = torch.tensor([1.0, 2.0], device=device_type)
result = torch.all(a > 0)
assert result, "should throw"
def should_not_throw(fn):
try:
fn()
return True
except Exception:
return False
self.assertEqual(should_not_throw(fn), True)
_, code = run_and_get_code(fn)
self.assertEqual(code[0].count("tl.device_assert"), 1)
if __name__ == "__main__":
run_tests()
| TestTorchDeviceAssertTrigger |
python | matplotlib__matplotlib | lib/matplotlib/collections.py | {
"start": 49974,
"end": 53163
} | class ____(_CollectionWithSizes):
def __init__(self, verts, sizes=None, *, closed=True, **kwargs):
"""
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
sizes : array-like, default: None
Squared scaling factors for the polygons. The coordinates of each
polygon *verts_i* are multiplied by the square-root of the
corresponding entry in *sizes* (i.e., *sizes* specify the scaling
of areas). The scaling is applied before the Artist master
transform.
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
**kwargs
Forwarded to `.Collection`.
"""
super().__init__(**kwargs)
self.set_sizes(sizes)
self.set_verts(verts, closed)
self.stale = True
def set_verts(self, verts, closed=True):
"""
Set the vertices of the polygons.
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
"""
self.stale = True
if isinstance(verts, np.ma.MaskedArray):
verts = verts.astype(float).filled(np.nan)
# No need to do anything fancy if the path isn't closed.
if not closed:
self._paths = [mpath.Path(xy) for xy in verts]
return
# Fast path for arrays
if isinstance(verts, np.ndarray) and len(verts.shape) == 3 and verts.size:
verts_pad = np.concatenate((verts, verts[:, :1]), axis=1)
# It's faster to create the codes and internal flags once in a
# template path and reuse them.
template_path = mpath.Path(verts_pad[0], closed=True)
codes = template_path.codes
_make_path = mpath.Path._fast_from_codes_and_verts
self._paths = [_make_path(xy, codes, internals_from=template_path)
for xy in verts_pad]
return
self._paths = []
for xy in verts:
if len(xy):
self._paths.append(mpath.Path._create_closed(xy))
else:
self._paths.append(mpath.Path(xy))
set_paths = set_verts
def set_verts_and_codes(self, verts, codes):
"""Initialize vertices with path codes."""
if len(verts) != len(codes):
raise ValueError("'codes' must be a 1D list or array "
"with the same length of 'verts'")
self._paths = [mpath.Path(xy, cds) if len(xy) else mpath.Path(xy)
for xy, cds in zip(verts, codes)]
self.stale = True
| PolyCollection |
python | getsentry__sentry | src/sentry/monitors/processing_errors/manager.py | {
"start": 937,
"end": 8238
} | class ____(Exception):
pass
def _get_cluster() -> RedisCluster | StrictRedis[str]:
return redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
def build_set_identifier(entity_identifier: str) -> str:
return f"monitors.processing_errors_set.{entity_identifier}"
def build_error_identifier(uuid: uuid.UUID) -> str:
return f"monitors.processing_errors.{uuid.hex}"
def build_monitor_identifier(monitor: Monitor) -> str:
return f"monitor:{monitor.id}"
def build_project_identifier(project_id: int) -> str:
return f"project:{project_id}"
def _get_entity_identifier_from_error(
error: CheckinProcessingError,
monitor: Monitor | None = None,
) -> str:
if monitor is None:
# Attempt to get the monitor from the checkin info if we failed to retrieve it during ingestion
try:
monitor = Monitor.objects.get(
project_id=error.checkin.message["project_id"],
slug=error.checkin.payload["monitor_slug"],
)
except Monitor.DoesNotExist:
pass
if monitor:
entity_identifier = build_monitor_identifier(monitor)
else:
entity_identifier = build_project_identifier(error.checkin.message["project_id"])
return entity_identifier
def _get_for_entities(entity_identifiers: list[str]) -> list[CheckinProcessingError]:
redis = _get_cluster()
pipeline = redis.pipeline()
for identifier in entity_identifiers:
pipeline.zrange(build_set_identifier(identifier), 0, MAX_ERRORS_PER_SET, desc=True)
error_identifiers = [
build_error_identifier(uuid.UUID(error_identifier))
for error_identifier in chain(*pipeline.execute())
]
errors = [
CheckinProcessingError.from_dict(json.loads(raw_error))
for raw_error in redis.mget(error_identifiers)
if raw_error is not None
]
errors.sort(key=lambda error: error.checkin.ts.timestamp(), reverse=True)
return errors
def _delete_for_entity(entity_identifier: str, uuid: uuid.UUID) -> None:
pipeline = _get_cluster().pipeline()
pipeline.zrem(build_set_identifier(entity_identifier), uuid.hex)
pipeline.delete(build_error_identifier(uuid))
pipeline.execute()
def _delete_for_entity_by_type(entity_identifier: str, type: ProcessingErrorType) -> None:
checkin_errors = _get_for_entities([entity_identifier])
redis = _get_cluster()
pipeline = redis.pipeline()
for checkin_error in checkin_errors:
errors = checkin_error.errors
if not any(error["type"] == type for error in errors):
continue
# If the processing error only holds this one type of error, remove the whole error
if len(errors) == 1:
pipeline.zrem(build_set_identifier(entity_identifier), checkin_error.id.hex)
pipeline.delete(build_error_identifier(checkin_error.id))
# If the processing error has other errors, filter out the matching error and update the redis value
else:
filtered_errors = list(filter(lambda error: error["type"] != type, errors))
new_checkin_error = CheckinProcessingError(
filtered_errors, checkin_error.checkin, id=checkin_error.id
)
new_serialized_checkin_error = json.dumps(new_checkin_error.to_dict())
error_key = build_error_identifier(checkin_error.id)
pipeline.set(error_key, new_serialized_checkin_error, ex=MONITOR_ERRORS_LIFETIME)
pipeline.execute()
def store_error(error: CheckinProcessingError, monitor: Monitor | None):
entity_identifier = _get_entity_identifier_from_error(error, monitor)
error_set_key = build_set_identifier(entity_identifier)
error_key = build_error_identifier(error.id)
serialized_error = json.dumps(error.to_dict())
redis_client = _get_cluster()
pipeline = redis_client.pipeline(transaction=False)
pipeline.zadd(error_set_key, {error.id.hex: error.checkin.ts.timestamp()})
pipeline.set(error_key, serialized_error, ex=MONITOR_ERRORS_LIFETIME)
pipeline.expire(error_set_key, MONITOR_ERRORS_LIFETIME)
pipeline.zrange(error_set_key, 0, -(MAX_ERRORS_PER_SET + 1))
processing_errors_to_remove = pipeline.execute()[-1]
# Cap the error list to the `MAX_ERRORS_PER_SET` most recent errors
if processing_errors_to_remove:
pipeline = redis_client.pipeline(transaction=False)
# XXX: We need to make individual delete commands here since pipeline
# doesn't support passing multiple identifiers to delete
for result in processing_errors_to_remove:
pipeline.delete(build_error_identifier(uuid.UUID(result)))
pipeline.zrem(error_set_key, *processing_errors_to_remove)
pipeline.execute()
def delete_error(project: Project, uuid: uuid.UUID):
error_identifier = build_error_identifier(uuid)
redis = _get_cluster()
raw_error = redis.get(error_identifier)
if raw_error is None:
return
error = CheckinProcessingError.from_dict(json.loads(raw_error))
if error.checkin.message["project_id"] != project.id:
# TODO: Better exception class
raise InvalidProjectError()
entity_identifier = _get_entity_identifier_from_error(error)
_delete_for_entity(entity_identifier, uuid)
def delete_errors_for_monitor_by_type(monitor: Monitor, type: ProcessingErrorType):
_delete_for_entity_by_type(build_monitor_identifier(monitor), type)
def delete_errors_for_project_by_type(project: Project, type: ProcessingErrorType):
_delete_for_entity_by_type(build_project_identifier(project.id), type)
def get_errors_for_monitor(monitor: Monitor) -> list[CheckinProcessingError]:
return _get_for_entities([build_monitor_identifier(monitor)])
def get_errors_for_projects(projects: list[Project]) -> list[CheckinProcessingError]:
return _get_for_entities([build_project_identifier(project.id) for project in projects])
def handle_processing_errors(item: CheckinItem, error: ProcessingErrorsException):
try:
project = Project.objects.get_from_cache(id=item.message["project_id"])
organization = Organization.objects.get_from_cache(id=project.organization_id)
metrics.incr(
"monitors.checkin.handle_processing_error",
tags={
"source": "consumer",
"sdk_platform": item.message["sdk"],
},
)
if random.random() < ANALYTICS_SAMPLING_RATE:
try:
analytics.record(
CheckinProcessingErrorStored(
organization_id=organization.id,
project_id=project.id,
monitor_slug=item.payload["monitor_slug"],
error_types=[
process_error["type"].value for process_error in error.processing_errors
],
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
checkin_processing_error = CheckinProcessingError(error.processing_errors, item)
store_error(checkin_processing_error, error.monitor)
except Exception:
logger.exception("Failed to log processing error")
| InvalidProjectError |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/components/workspace_component/component.py | {
"start": 1283,
"end": 1665
} | class ____(pydantic.BaseModel):
account_id: str = pydantic.Field(..., description="The Fivetran account ID.")
api_key: str = pydantic.Field(
..., description="API key used to authenticate to a Fivetran instance."
)
api_secret: str = pydantic.Field(
..., description="API secret used to authenticate to a Fivetran instance."
)
| FivetranWorkspaceModel |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 11393,
"end": 16075
} | class ____:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1 * 71, dtype=dt1)[()]
b = np.array(sg2 * 19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div * b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = [divmod(*t) for t in arg]
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = f'op: {op.__name__}, dtype: {dt}'
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()]
b = np.array(sg2 * 6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div * b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, f'dt: {dt}')
rem = operator.mod(-a, -b)
assert_(rem >= -b, f'dt: {dt}')
# Check nans, inf
with warnings.catch_warnings(), np.errstate(all='ignore'):
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), f'dt: {dt}')
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), f'dt: {dt}')
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), f'dt: {dt}')
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))
def test_inplace_floordiv_handling(self):
# issue gh-12927
# this only applies to in-place floordiv //=, because the output type
# promotes to float which does not fit
a = np.array([1, 2], np.int64)
b = np.array([1, 2], np.uint64)
with pytest.raises(TypeError,
match=r"Cannot cast ufunc 'floor_divide' output from"):
a //= b
| TestModulus |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 33034,
"end": 33267
} | class ____(Response):
"""
Response of events.add endpoint.
"""
_service = "events"
_action = "add"
_version = "2.9"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
| AddResponse |
python | getsentry__sentry | tests/snuba/api/serializers/test_group_stream.py | {
"start": 637,
"end": 10792
} | class ____(APITestCase, BaseMetricsTestCase):
def test_environment(self) -> None:
group = self.group
organization_id = group.project.organization_id
environment = Environment.get_or_create(group.project, "production")
with mock.patch(
"sentry.api.serializers.models.group_stream.snuba_tsdb.get_range",
side_effect=snuba_tsdb.get_range,
) as get_range:
serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[environment.id],
stats_period="14d",
organization_id=organization_id,
),
request=self.make_request(),
)
assert get_range.call_count == 1
for args, kwargs in get_range.call_args_list:
assert kwargs["environment_ids"] == [environment.id]
with mock.patch(
"sentry.api.serializers.models.group.snuba_tsdb.get_range",
side_effect=snuba_tsdb.get_range,
) as get_range:
serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=None, stats_period="14d", organization_id=organization_id
),
request=self.make_request(),
)
assert get_range.call_count == 1
for args, kwargs in get_range.call_args_list:
assert kwargs["environment_ids"] is None
@pytest.mark.xfail(reason="Does not work with the metrics release health backend")
def test_session_count(self) -> None:
group = self.group
organization_id = group.project.organization_id
environment = Environment.get_or_create(group.project, "prod")
dev_environment = Environment.get_or_create(group.project, "dev")
no_sessions_environment = Environment.get_or_create(group.project, "no_sessions")
self.received = time.time()
self.session_started = time.time() // 60 * 60
self.session_release = "foo@1.0.0"
self.session_crashed_release = "foo@2.0.0"
self.store_session(
{
"session_id": "5d52fd05-fcc9-4bf3-9dc9-267783670341",
"distinct_id": "39887d89-13b2-4c84-8c23-5d13d2102667",
"status": "ok",
"seq": 0,
"release": self.session_release,
"environment": "dev",
"retention_days": 90,
"org_id": self.project.organization_id,
"project_id": self.project.id,
"duration": 1,
"errors": 0,
"started": self.session_started - 120,
"received": self.received - 120,
}
)
self.store_session(
{
"session_id": "5e910c1a-6941-460e-9843-24103fb6a63c",
"distinct_id": "39887d89-13b2-4c84-8c23-5d13d2102668",
"status": "ok",
"seq": 0,
"release": self.session_release,
"environment": "prod",
"retention_days": 90,
"org_id": self.project.organization_id,
"project_id": self.project.id,
"duration": 60.0,
"errors": 0,
"started": self.session_started - 240,
"received": self.received - 240,
}
)
self.store_session(
{
"session_id": "5e910c1a-6941-460e-9843-24103fb6a63c",
"distinct_id": "39887d89-13b2-4c84-8c23-5d13d2102669",
"status": "exited",
"seq": 1,
"release": self.session_release,
"environment": "prod",
"retention_days": 90,
"org_id": self.project.organization_id,
"project_id": self.project.id,
"duration": 30.0,
"errors": 0,
"started": self.session_started,
"received": self.received,
}
)
self.store_session(
{
"session_id": "a148c0c5-06a2-423b-8901-6b43b812cf82",
"distinct_id": "39887d89-13b2-4c84-8c23-5d13d2102660",
"status": "crashed",
"seq": 0,
"release": self.session_crashed_release,
"environment": "prod",
"retention_days": 90,
"org_id": self.project.organization_id,
"project_id": self.project.id,
"duration": 60.0,
"errors": 0,
"started": self.session_started,
"received": self.received,
}
)
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
stats_period="14d", organization_id=organization_id
),
request=self.make_request(),
)
assert "sessionCount" not in result[0]
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
stats_period="14d", expand=["sessions"], organization_id=organization_id
),
request=self.make_request(),
)
assert result[0]["sessionCount"] == 3
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[environment.id],
stats_period="14d",
expand=["sessions"],
organization_id=organization_id,
),
request=self.make_request(),
)
assert result[0]["sessionCount"] == 2
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[no_sessions_environment.id],
stats_period="14d",
expand=["sessions"],
organization_id=organization_id,
),
request=self.make_request(),
)
assert result[0]["sessionCount"] is None
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[dev_environment.id],
stats_period="14d",
expand=["sessions"],
organization_id=organization_id,
),
request=self.make_request(),
)
assert result[0]["sessionCount"] == 1
self.store_session(
{
"session_id": "a148c0c5-06a2-423b-8901-6b43b812cf83",
"distinct_id": "39887d89-13b2-4c84-8c23-5d13d2102627",
"status": "ok",
"seq": 0,
"release": self.session_release,
"environment": "dev",
"retention_days": 90,
"org_id": self.project.organization_id,
"project_id": self.project.id,
"duration": 60.0,
"errors": 0,
"started": self.session_started - 1590061, # approximately 18 days
"received": self.received - 1590061, # approximately 18 days
}
)
result = serialize(
[group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[dev_environment.id],
stats_period="14d",
expand=["sessions"],
start=timezone.now() - timedelta(days=30),
end=timezone.now() - timedelta(days=15),
organization_id=organization_id,
),
request=self.make_request(),
)
assert result[0]["sessionCount"] == 1
# Delete the cache from the query we did above, else this result comes back as 1 instead of 0.5
key_hash = hash_values([group.project.id, "", "", f"{dev_environment.id}"])
cache.delete(f"w-s:{key_hash}")
project2 = self.create_project(
organization=self.organization, teams=[self.team], name="Another project"
)
data = {
"fingerprint": ["meow"],
"timestamp": timezone.now().isoformat(),
"type": "error",
"exception": [{"type": "Foo"}],
}
event = self.store_event(data=data, project_id=project2.id)
self.store_event(data=data, project_id=project2.id)
self.store_event(data=data, project_id=project2.id)
result = serialize(
[group, event.group],
serializer=StreamGroupSerializerSnuba(
environment_ids=[dev_environment.id],
stats_period="14d",
expand=["sessions"],
organization_id=organization_id,
),
request=self.make_request(),
)
assert result[0]["sessionCount"] == 2
# No sessions in project2
assert result[1]["sessionCount"] is None
def test_skipped_date_timestamp_filters(self) -> None:
group = self.create_group()
serializer = StreamGroupSerializerSnuba(
search_filters=[
SearchFilter(
SearchKey("timestamp"),
">",
SearchValue(before_now(hours=1)),
),
SearchFilter(
SearchKey("timestamp"),
"<",
SearchValue(before_now(seconds=1)),
),
SearchFilter(
SearchKey("date"),
">",
SearchValue(before_now(hours=1)),
),
SearchFilter(
SearchKey("date"),
"<",
SearchValue(before_now(seconds=1)),
),
]
)
assert not serializer.conditions
result = serialize(
[group],
self.user,
serializer=serializer,
request=self.make_request(),
)
assert result[0]["id"] == str(group.id)
| StreamGroupSerializerTestCase |
python | celery__celery | celery/canvas.py | {
"start": 54542,
"end": 55391
} | class ____(Signature):
_task_name = None
_unpack_args = itemgetter('task', 'it')
@classmethod
def from_dict(cls, d, app=None):
return cls(*cls._unpack_args(d['kwargs']), app=app, **d['options'])
def __init__(self, task, it, **options):
super().__init__(self._task_name, (),
{'task': task, 'it': regen(it)}, immutable=True, **options
)
def apply_async(self, args=None, kwargs=None, **opts):
# need to evaluate generators
args = args if args else ()
kwargs = kwargs if kwargs else {}
task, it = self._unpack_args(self.kwargs)
return self.type.apply_async(
(), {'task': task, 'it': list(it)},
route_name=task_name_from(self.kwargs.get('task')), **opts
)
@Signature.register_type()
| _basemap |
python | huggingface__transformers | src/transformers/models/decision_transformer/modeling_decision_transformer.py | {
"start": 27887,
"end": 35871
} | class ____(DecisionTransformerPreTrainedModel):
"""
The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
setting. Refer to the paper for more details: https://huggingface.co/papers/2106.01345
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
# note: the only difference between this GPT2Model and the default Huggingface version
# is that the positional embeddings are removed (since we'll add those ourselves)
self.encoder = DecisionTransformerGPT2Model(config)
self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
self.embed_return = torch.nn.Linear(1, config.hidden_size)
self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
self.embed_ln = nn.LayerNorm(config.hidden_size)
# note: we don't predict states or returns for the paper
self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
self.predict_action = nn.Sequential(
*([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
)
self.predict_return = torch.nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
states: Optional[torch.FloatTensor] = None,
actions: Optional[torch.FloatTensor] = None,
rewards: Optional[torch.FloatTensor] = None,
returns_to_go: Optional[torch.FloatTensor] = None,
timesteps: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], DecisionTransformerOutput]:
r"""
states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
The states for each step in the trajectory
actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
The actions taken by the "expert" policy for the current state, these are masked for auto regressive
prediction
rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The rewards for each state, action
returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The returns for each state in the trajectory
timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
The timestep for each step in the trajectory
Examples:
```python
>>> from transformers import DecisionTransformerModel
>>> import torch
>>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
>>> # evaluation
>>> model = model.to(device)
>>> model.eval()
>>> env = gym.make("Hopper-v3")
>>> state_dim = env.observation_space.shape[0]
>>> act_dim = env.action_space.shape[0]
>>> state = env.reset()
>>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
>>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
>>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
>>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
>>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> # forward pass
>>> with torch.no_grad():
... state_preds, action_preds, return_preds = model(
... states=states,
... actions=actions,
... rewards=rewards,
... returns_to_go=target_return,
... timesteps=timesteps,
... attention_mask=attention_mask,
... return_dict=False,
... )
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length = states.shape[0], states.shape[1]
if attention_mask is None:
# attention mask for GPT: 1 if can be attended to, 0 if not
attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
# embed each modality with a different head
state_embeddings = self.embed_state(states)
action_embeddings = self.embed_action(actions)
returns_embeddings = self.embed_return(returns_to_go)
time_embeddings = self.embed_timestep(timesteps)
# time embeddings are treated similar to positional embeddings
state_embeddings = state_embeddings + time_embeddings
action_embeddings = action_embeddings + time_embeddings
returns_embeddings = returns_embeddings + time_embeddings
# this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
# which works nice in an autoregressive sense since states predict actions
stacked_inputs = (
torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
.permute(0, 2, 1, 3)
.reshape(batch_size, 3 * seq_length, self.hidden_size)
)
stacked_inputs = self.embed_ln(stacked_inputs)
# to make the attention mask fit the stacked inputs, have to stack it as well
stacked_attention_mask = (
torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
.permute(0, 2, 1)
.reshape(batch_size, 3 * seq_length)
)
device = stacked_inputs.device
# we feed in the input embeddings (not word indices as in NLP) to the model
encoder_outputs = self.encoder(
inputs_embeds=stacked_inputs,
attention_mask=stacked_attention_mask,
position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = encoder_outputs[0]
# reshape x so that the second dimension corresponds to the original
# returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
# get predictions
return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
action_preds = self.predict_action(x[:, 1]) # predict next action given state
if not return_dict:
return (state_preds, action_preds, return_preds)
return DecisionTransformerOutput(
last_hidden_state=encoder_outputs.last_hidden_state,
state_preds=state_preds,
action_preds=action_preds,
return_preds=return_preds,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
__all__ = [
"DecisionTransformerGPT2Model",
"DecisionTransformerGPT2PreTrainedModel",
"DecisionTransformerModel",
"DecisionTransformerPreTrainedModel",
]
| DecisionTransformerModel |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_deleted_event.py | {
"start": 206,
"end": 492
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item that was deleted."""
type: Literal["conversation.item.deleted"]
"""The event type, must be `conversation.item.deleted`."""
| ConversationItemDeletedEvent |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/configs.py | {
"start": 1717,
"end": 3853
} | class ____(_StageConfigBase):
pass
def resolve_stage_config(
stage_cfg_value: Union[bool, Dict[str, Any], _StageConfigBase],
stage_config_cls: Type[T],
processor_defaults: Optional[Dict[str, Any]] = None,
) -> T:
"""Resolve a stage config value (bool | dict | StageConfig) into a typed StageConfig.
Args:
stage_cfg_value: The stage config value (bool, dict, or typed StageConfig).
stage_config_cls: The StageConfig class to instantiate.
processor_defaults: Optional dict of processor-level defaults to merge in.
Expected keys: 'batch_size', 'concurrency', 'runtime_env', 'model_source'.
Returns:
Resolved StageConfig instance with defaults merged.
"""
processor_defaults = processor_defaults or {}
# If already a typed config, create a copy to avoid mutating the input
if isinstance(stage_cfg_value, stage_config_cls):
resolved = stage_config_cls.model_validate(stage_cfg_value.model_dump())
# If bool, create minimal config with enabled flag
elif isinstance(stage_cfg_value, bool):
resolved = stage_config_cls(enabled=stage_cfg_value)
# If dict, parse it into the config class
elif isinstance(stage_cfg_value, dict):
resolved = stage_config_cls(**stage_cfg_value)
else:
raise TypeError(
f"Unsupported type for stage config: {type(stage_cfg_value).__name__}. "
f"Expected bool, dict, or {stage_config_cls.__name__} instance. "
f"Got: {stage_cfg_value}"
)
# Merge processor defaults for fields not explicitly set
default_fields = ["batch_size", "concurrency", "runtime_env", "model_source"]
for field_name in default_fields:
# Skip if field doesn't exist on this config class (e.g., model_source only on some stages)
if not hasattr(resolved, field_name):
continue
if (
getattr(resolved, field_name, None) is None
and field_name in processor_defaults
):
setattr(resolved, field_name, processor_defaults[field_name])
return resolved
| PrepareImageStageConfig |
python | pytorch__pytorch | torch/_logging/_internal.py | {
"start": 44249,
"end": 51950
} | class ____(Generic[_P]):
def __init__(
self, func: Callable[_P, str], *args: _P.args, **kwargs: _P.kwargs
) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
def __str__(self) -> str:
return self.func(*self.args, **self.kwargs)
# Logs the time it takes to do structured logging by frame/compile id
# key is always {frame_id}_{frame_compile_id}
structured_logging_overhead: dict[str, float] = defaultdict(float)
def add_structured_logging_overhead(time_spent: float) -> None:
global structured_logging_overhead
key = None
if (trace_id := torch._guards.CompileContext.current_trace_id()) is not None:
frame_id = trace_id.compile_id.frame_id
frame_compile_id = trace_id.compile_id.frame_compile_id
# Why not trace_id.attempt, like structured logging?
# We aggregate across all attempts because
# a compilation metric is logged per successful attempt
key = f"{frame_id}_{frame_compile_id}"
# TODO: deal with structured logging that occurs outside of specific compile ids
# It's hard to figure out where we would log that if we want it in compilation metrics
# itself.
if key is not None:
key = str(key)
structured_logging_overhead[key] += time_spent
def get_structured_logging_overhead() -> Optional[float]:
key = None
if (trace_id := torch._guards.CompileContext.current_trace_id()) is not None:
frame_id = trace_id.compile_id.frame_id
frame_compile_id = trace_id.compile_id.frame_compile_id
key = f"{frame_id}_{frame_compile_id}"
if key is not None:
return structured_logging_overhead.get(key)
else:
return None
def trace_structured_artifact(
name: str, # this will go in metadata
encoding: str,
payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
compile_id: Optional[CompileId] = None,
) -> None:
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": name,
"encoding": encoding,
},
payload_fn=payload_fn,
compile_id=compile_id,
)
def trace_structured(
name: str,
# NB: metadata expected to be dict so adding more info is forward compatible
# Tuple[str, int] is a special case for string interning
metadata_fn: Callable[[], Union[dict[str, Any], tuple[str, int]]] = dict,
*,
payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
suppress_context: bool = False,
expect_trace_id: bool = True, # Whether or not we expect to have a current trace id
record_logging_overhead: bool = True, # Whether or not to record the time spent on structured logging
compile_id: Optional[CompileId] = None, # Optional if unavailable in the trace
) -> None:
"""
metadata is an arbitrary JSON compatible struct, but it's expected to not be
too long (e.g., less than 1MB)
payload is an arbitrary string, which can be arbitrarily long (but expected to have
newlines so no lines are too long)
"""
assert name not in [
"rank",
"compiled_autograd_id",
"frame_id",
"frame_compile_id",
"attempt",
"severity",
"timestamp",
"pathname",
"thread",
]
assert callable(metadata_fn), (
f"metadata_fn should be callable, but got {type(metadata_fn)}"
)
assert callable(payload_fn), (
f"payload_fn should be callable, but got {type(payload_fn)}"
)
# trace_log never propagates and is ALWAYS DEBUG, so also check that there
# are handlers instead of checking the log level
if trace_log.handlers:
start_time = time.time_ns()
record: dict[str, object] = {}
record[name] = metadata_fn()
if not suppress_context:
# TODO: Actually, the rank probably should just be emitted once at
# the top, and not repeatedly spammed in all the logs, since it
# never changes and we assume no interleaving
if dist.is_available() and dist.is_initialized():
record["rank"] = dist.get_rank()
trace_id = torch._guards.CompileContext.current_trace_id()
if expect_trace_id and trace_id is None and compile_id is None:
# Record the stack of the log call to better diagnose why we
# don't have a frame id for it
record["stack"] = torch._logging.structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
)
else:
cid = trace_id.compile_id if trace_id else compile_id
if cid is not None:
if cid.compiled_autograd_id is not None:
record["compiled_autograd_id"] = cid.compiled_autograd_id
if cid.frame_id is not None:
record["frame_id"] = cid.frame_id
if cid.frame_compile_id is not None:
record["frame_compile_id"] = cid.frame_compile_id
if trace_id:
record["attempt"] = trace_id.attempt
payload = payload_fn()
if payload is not None:
if not isinstance(payload, str):
if isinstance(payload, list):
# special case to look better
payload = "[\n" + ",\n".join(json.dumps(i) for i in payload) + "\n]"
else:
def json_default(obj):
# Sets aren't json serializable
if isinstance(obj, set):
return list(obj)
raise TypeError(
f"Object of type {type(obj)} is not JSON serializable"
)
# force newlines so we are unlikely to overflow line limit
payload = json.dumps(payload, default=json_default, indent=0)
h = hashlib.md5(usedforsecurity=False)
h.update(payload.encode("utf-8"))
record["has_payload"] = h.hexdigest()
trace_log.debug(
"", extra={"metadata": record, "payload": payload}, stacklevel=2
)
log_trace_structured_event(name, record)
if record_logging_overhead:
# Convert to seconds from nanoseconds, add it to the frame compile total
structured_logging_overhead_s = (time.time_ns() - start_time) / 1e9
add_structured_logging_overhead(structured_logging_overhead_s)
def dtrace_structured(
name: str,
# NB: metadata expected to be dict so adding more info is forward compatible
# Tuple[str, int] is a special case for string interning
metadata_fn: Callable[[], Union[dict[str, Any], tuple[str, int]]] = dict,
*,
payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
suppress_context: bool = False,
expect_trace_id: bool = False, # Whether or not we expect to have a current trace id
record_logging_overhead: bool = True, # Whether or not to record the time spent on structured logging
) -> None:
"""
For logging more detailed information used for debugging. This may result in
the program becoming slow.
"""
if GET_DTRACE_STRUCTURED:
trace_structured(
name,
metadata_fn,
payload_fn=payload_fn,
suppress_context=suppress_context,
expect_trace_id=expect_trace_id,
record_logging_overhead=record_logging_overhead,
)
import torch._guards
import torch._utils_internal
import torch.distributed as dist
| LazyString |
python | huggingface__transformers | src/transformers/models/cpm/tokenization_cpm.py | {
"start": 1084,
"end": 13862
} | class ____(PreTrainedTokenizer):
"""Runs pre-tokenization with Jieba-RS segmentation tool. It is used in CPM models."""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
do_lower_case=False,
remove_space=True,
keep_accents=False,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
sep_token="<sep>",
pad_token="<pad>",
cls_token="<cls>",
mask_token="<mask>",
additional_special_tokens=["<eop>", "<eod>"],
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
"""
Construct a CPM tokenizer. Based on [Jieba-RS](https://pypi.org/project/rjieba/) and
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
this token instead.
sep_token (`str`, *optional*, defaults to `"<sep>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering. It is also used as the
last token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"<cls>"`):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
try:
import rjieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install rjieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
self.translator = str.maketrans(" \n", "\u2582\u2583")
super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self._pad_token_type_id = 3
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> list[str]:
"""Tokenize a string."""
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format:
- single sequence: `X <sep> <cls>`
- pair of sequences: `A <sep> B <sep> <cls>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return token_ids_0 + sep + cls
return token_ids_0 + sep + token_ids_1 + sep + cls
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
return ([0] * len(token_ids_0)) + [1, 1]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0] + cls_segment_id
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def _decode(self, *args, **kwargs):
text = super()._decode(*args, **kwargs)
text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
return text
__all__ = ["CpmTokenizer"]
| CpmTokenizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.