language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 31399,
"end": 31989
} | class ____(__TestCase):
"""Builtin dict preserves insertion order.
Reuse some of tests in OrderedDict selectively.
"""
module = builtins
OrderedDict = dict
for method in (
"test_init test_update test_abc test_clear test_delitem " +
"test_setitem test_detect_deletion_during_iteration " +
"test_popitem test_reinsert test_override_update " +
"test_highly_nested test_highly_nested_subclass " +
"test_delitem_hash_collision ").split():
setattr(CPythonBuiltinDictTests, method, getattr(OrderedDictTests, method))
del method
| CPythonBuiltinDictTests |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-powerful-integers.py | {
"start": 1058,
"end": 1789
} | class ____(object):
def numberOfPowerfulInt(self, start, finish, limit, s):
"""
:type start: int
:type finish: int
:type limit: int
:type s: str
:rtype: int
"""
def count(x):
result = 0
str_x = str(x)
l = len(str_x)-len(s)
cnt = (limit+1)**l
for i in xrange(l):
cnt //= limit+1
result += (min(int(str_x[i])-1, limit)-0+1)*cnt
if int(str_x[i]) > limit:
break
else:
if int(str_x[-len(s):]) >= int(s):
result += 1
return result
return count(finish)-count(start-1)
| Solution2 |
python | scipy__scipy | scipy/sparse/linalg/tests/test_special_sparse_arrays.py | {
"start": 6954,
"end": 9517
} | class ____:
"""
Sakurai tests
"""
def test_specific_shape(self):
sak = Sakurai(6)
assert_array_equal(sak.toarray(), sak(np.eye(6)))
a = np.array(
[
[ 5, -4, 1, 0, 0, 0],
[-4, 6, -4, 1, 0, 0],
[ 1, -4, 6, -4, 1, 0],
[ 0, 1, -4, 6, -4, 1],
[ 0, 0, 1, -4, 6, -4],
[ 0, 0, 0, 1, -4, 5]
]
)
np.array_equal(a, sak.toarray())
np.array_equal(sak.tosparse().toarray(), sak.toarray())
ab = np.array(
[
[ 1, 1, 1, 1, 1, 1],
[-4, -4, -4, -4, -4, -4],
[ 5, 6, 6, 6, 6, 5]
]
)
np.array_equal(ab, sak.tobanded())
e = np.array(
[0.03922866, 0.56703972, 2.41789479, 5.97822974,
10.54287655, 14.45473055]
)
np.array_equal(e, sak.eigenvalues())
np.array_equal(e[:2], sak.eigenvalues(2))
# `Sakurai` default `dtype` is `np.int8` as its entries are small integers
@pytest.mark.parametrize('dtype', ALLDTYPES)
def test_linearoperator_shape_dtype(self, dtype):
n = 7
sak = Sakurai(n, dtype=dtype)
assert sak.shape == (n, n)
assert sak.dtype == dtype
assert_array_equal(sak.toarray(), Sakurai(n).toarray().astype(dtype))
assert_array_equal(sak.tosparse().toarray(),
Sakurai(n).tosparse().toarray().astype(dtype))
@pytest.mark.parametrize('dtype', ALLDTYPES)
@pytest.mark.parametrize('argument_dtype', ALLDTYPES)
def test_dot(self, dtype, argument_dtype):
""" Test the dot-product for type preservation and consistency.
"""
result_dtype = np.promote_types(argument_dtype, dtype)
n = 5
sak = Sakurai(n)
x0 = np.arange(n)
x1 = x0.reshape((-1, 1))
x2 = np.arange(2 * n).reshape((n, 2))
input_set = [x0, x1, x2]
for x in input_set:
y = sak.dot(x.astype(argument_dtype))
assert x.shape == y.shape
assert np.can_cast(y.dtype, result_dtype)
if x.ndim == 2:
ya = sak.toarray() @ x.astype(argument_dtype)
np.array_equal(y, ya)
assert np.can_cast(ya.dtype, result_dtype)
ys = sak.tosparse() @ x.astype(argument_dtype)
np.array_equal(y, ys)
assert np.can_cast(ys.dtype, result_dtype)
| TestSakurai |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_dot_address.py | {
"start": 1891,
"end": 4688
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid Polkadot addresses."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"12gX42C4Fj1wgtfgoP624zeHrcPBqzhb4yAENyvFdGX6EUnN",
"15thjfpZX1xVcsnfya1oXmbXzau2on5abAc4XXYP62SNwwQQ",
"12771k5UXewvK7FXd1RpPHxvFiCG4GQCrxRmXWN5tAAwDQoi",
"13dCwieVYyuLVRdDcxomFeaYU1C73QpNDJreqHvKcggikWjK",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_dot_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidDotAddress().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidDotAddress |
python | readthedocs__readthedocs.org | readthedocs/builds/models.py | {
"start": 34897,
"end": 35722
} | class ____:
"""
Mixin for common command result methods/properties.
Shared methods between the database model :py:class:`BuildCommandResult` and
non-model representations of build command results from the API
"""
@property
def successful(self):
"""Did the command exit with a successful exit code."""
return self.exit_code == 0
@property
def failed(self):
"""
Did the command exit with a failing exit code.
Helper for inverse of :py:meth:`successful`
"""
return not self.successful
@property
def finished(self):
"""
Check if the command has finished running.
This is determined by checking if the `end_time` is not None.
"""
return self.end_time is not None
| BuildCommandResultMixin |
python | python-openxml__python-docx | src/docx/opc/rel.py | {
"start": 223,
"end": 4469
} | class ____(Dict[str, "_Relationship"]):
"""Collection object for |_Relationship| instances, having list semantics."""
def __init__(self, baseURI: str):
super(Relationships, self).__init__()
self._baseURI = baseURI
self._target_parts_by_rId: dict[str, Any] = {}
def add_relationship(
self, reltype: str, target: Part | str, rId: str, is_external: bool = False
) -> "_Relationship":
"""Return a newly added |_Relationship| instance."""
rel = _Relationship(rId, reltype, target, self._baseURI, is_external)
self[rId] = rel
if not is_external:
self._target_parts_by_rId[rId] = target
return rel
def get_or_add(self, reltype: str, target_part: Part) -> _Relationship:
"""Return relationship of `reltype` to `target_part`, newly added if not already
present in collection."""
rel = self._get_matching(reltype, target_part)
if rel is None:
rId = self._next_rId
rel = self.add_relationship(reltype, target_part, rId)
return rel
def get_or_add_ext_rel(self, reltype: str, target_ref: str) -> str:
"""Return rId of external relationship of `reltype` to `target_ref`, newly added
if not already present in collection."""
rel = self._get_matching(reltype, target_ref, is_external=True)
if rel is None:
rId = self._next_rId
rel = self.add_relationship(reltype, target_ref, rId, is_external=True)
return rel.rId
def part_with_reltype(self, reltype: str) -> Part:
"""Return target part of rel with matching `reltype`, raising |KeyError| if not
found and |ValueError| if more than one matching relationship is found."""
rel = self._get_rel_of_type(reltype)
return rel.target_part
@property
def related_parts(self):
"""Dict mapping rIds to target parts for all the internal relationships in the
collection."""
return self._target_parts_by_rId
@property
def xml(self) -> str:
"""Serialize this relationship collection into XML suitable for storage as a
.rels file in an OPC package."""
rels_elm = CT_Relationships.new()
for rel in self.values():
rels_elm.add_rel(rel.rId, rel.reltype, rel.target_ref, rel.is_external)
return rels_elm.xml
def _get_matching(
self, reltype: str, target: Part | str, is_external: bool = False
) -> _Relationship | None:
"""Return relationship of matching `reltype`, `target`, and `is_external` from
collection, or None if not found."""
def matches(rel: _Relationship, reltype: str, target: Part | str, is_external: bool):
if rel.reltype != reltype:
return False
if rel.is_external != is_external:
return False
rel_target = rel.target_ref if rel.is_external else rel.target_part
return rel_target == target
for rel in self.values():
if matches(rel, reltype, target, is_external):
return rel
return None
def _get_rel_of_type(self, reltype: str):
"""Return single relationship of type `reltype` from the collection.
Raises |KeyError| if no matching relationship is found. Raises |ValueError| if
more than one matching relationship is found.
"""
matching = [rel for rel in self.values() if rel.reltype == reltype]
if len(matching) == 0:
tmpl = "no relationship of type '%s' in collection"
raise KeyError(tmpl % reltype)
if len(matching) > 1:
tmpl = "multiple relationships of type '%s' in collection"
raise ValueError(tmpl % reltype)
return matching[0]
@property
def _next_rId(self) -> str: # pyright: ignore[reportReturnType]
"""Next available rId in collection, starting from 'rId1' and making use of any
gaps in numbering, e.g. 'rId2' for rIds ['rId1', 'rId3']."""
for n in range(1, len(self) + 2):
rId_candidate = "rId%d" % n # like 'rId19'
if rId_candidate not in self:
return rId_candidate
| Relationships |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_lm1b.py | {
"start": 10907,
"end": 12578
} | class ____(nn.Module):
"""Transformer pure decoder stack for language modelling.
Args:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self,
inputs,
inputs_positions=None,
inputs_segmentation=None):
"""Applies TransformerLM on the inputs.
Args:
inputs: target data.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
Returns:
logits array from transformer decoder.
"""
config = self.config
# Make padding attention masks.
if config.decode:
# for fast autoregressive decoding we use no decoder mask
decoder_mask = None
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(inputs > 0, inputs > 0, dtype=config.dtype),
nn.make_causal_mask(inputs, dtype=config.dtype))
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
decoder_mask = nn.combine_masks(
decoder_mask,
nn.make_attention_mask(
inputs_segmentation,
inputs_segmentation,
jnp.equal,
dtype=config.dtype))
logits = Decoder(
config=config, shared_embedding=None, name='decoder')(
inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation,
decoder_mask=decoder_mask,
encoder_decoder_mask=None)
return logits.astype(self.config.dtype)
| TransformerLM |
python | gevent__gevent | src/greentest/3.10/test_asyncore.py | {
"start": 9721,
"end": 9876
} | class ____(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
| dispatcherwithsend_noread |
python | doocs__leetcode | solution/0600-0699/0685.Redundant Connection II/Solution.py | {
"start": 0,
"end": 889
} | class ____:
def findRedundantDirectedConnection(self, edges: List[List[int]]) -> List[int]:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
n = len(edges)
ind = [0] * n
for _, v in edges:
ind[v - 1] += 1
dup = [i for i, (_, v) in enumerate(edges) if ind[v - 1] == 2]
p = list(range(n))
if dup:
for i, (u, v) in enumerate(edges):
if i == dup[1]:
continue
pu, pv = find(u - 1), find(v - 1)
if pu == pv:
return edges[dup[0]]
p[pu] = pv
return edges[dup[1]]
for i, (u, v) in enumerate(edges):
pu, pv = find(u - 1), find(v - 1)
if pu == pv:
return edges[i]
p[pu] = pv
| Solution |
python | wntrblm__nox | nox/_version.py | {
"start": 1057,
"end": 3835
} | class ____(Exception):
"""The ``nox.needs_version`` specifier cannot be parsed."""
def get_nox_version() -> str:
"""Return the version of the installed Nox package."""
return metadata.version("nox")
def _parse_string_constant(node: ast.AST) -> str | None: # pragma: no cover
"""Return the value of a string constant."""
if isinstance(node, ast.Constant) and isinstance(node.value, str):
return node.value
return None
def _parse_needs_version(source: str, filename: str = "<unknown>") -> str | None:
"""Parse ``nox.needs_version`` from the user's Noxfile."""
value: str | None = None
module: ast.Module = ast.parse(source, filename=filename)
for statement in module.body:
if isinstance(statement, ast.Assign):
for target in statement.targets:
if (
isinstance(target, ast.Attribute)
and isinstance(target.value, ast.Name)
and target.value.id == "nox"
and target.attr == "needs_version"
):
value = _parse_string_constant(statement.value)
return value
def _read_needs_version(filename: str) -> str | None:
"""Read ``nox.needs_version`` from the user's Noxfile."""
with open(filename, encoding="utf-8") as io:
source = io.read()
return _parse_needs_version(source, filename=filename)
def _check_nox_version_satisfies(needs_version: str) -> None:
"""Check if the Nox version satisfies the given specifiers."""
version = Version(get_nox_version())
try:
specifiers = SpecifierSet(needs_version)
except InvalidSpecifier as error:
message = f"Cannot parse `nox.needs_version`: {error}"
with contextlib.suppress(InvalidVersion):
Version(needs_version)
message += f", did you mean '>= {needs_version}'?"
raise InvalidVersionSpecifier(message) from error
if not specifiers.contains(version, prereleases=True):
msg = f"The Noxfile requires Nox {specifiers}, you have {version}"
raise VersionCheckFailed(msg)
def check_nox_version(filename: str) -> None:
"""Check if ``nox.needs_version`` in the user's Noxfile is satisfied.
Args:
filename: The location of the user's Noxfile. ``nox.needs_version`` is
read from the Noxfile by parsing the AST.
Raises:
VersionCheckFailed: The Nox version does not satisfy what
``nox.needs_version`` specifies.
InvalidVersionSpecifier: The ``nox.needs_version`` specifier cannot be
parsed.
"""
needs_version = _read_needs_version(filename)
if needs_version is not None:
_check_nox_version_satisfies(needs_version)
| InvalidVersionSpecifier |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 25208,
"end": 25321
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Uos'
strategy_class = FileStrategy
| UosHostname |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 93257,
"end": 95884
} | class ____(GoogleCloudBaseOperator):
"""
Base class for all Dataplex Catalog operators.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"location",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
location: str,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.retry = retry
self.timeout = timeout
self.metadata = metadata
@cached_property
def hook(self) -> DataplexHook:
return DataplexHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location": self.location,
"project_id": self.project_id,
}
| DataplexCatalogBaseOperator |
python | pytorch__pytorch | test/distributed/tensor/debug/test_debug_mode.py | {
"start": 1057,
"end": 24819
} | class ____(TestCase):
def tearDown(self):
super().tearDown()
dist.destroy_process_group()
def setUp(self):
super().setUp()
self.world_size = 8
store = FakeStore()
dist.init_process_group(
backend="fake", rank=0, world_size=self.world_size, store=store
)
self.device_type = "cuda"
def test_debug_mode_mm(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
x = torch.randn(1, 8, requires_grad=False)
y = torch.randn(1, 32, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(0)], run_check=False)
with DebugMode(
record_torchfunction=True, record_ids=True, record_output=True
) as debug_mode:
torch.mm(x_dtensor, y_dtensor).sum()
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
torch.mm(dt$0: f32[8, 8]| S(0), dt$1: f32[8, 32]| S(0)) -> dt$6: f32[8, 32]| S(0)
aten::mm(dt$0: f32[8, 8]| S(0), dt$1: f32[8, 32]| S(0))
redistribute_input(1, S(0) -> R)
redistribute_input(t$2: f32[1, 32], trace: S(0)->R)
_c10d_functional::all_gather_into_tensor(t$2: f32[1, 32], 8, 0) -> t$3: f32[8, 32]
_c10d_functional::wait_tensor(t$3: f32[8, 32]) -> t$3: f32[8, 32]
aten::mm(t$4: f32[1, 8], t$3: f32[8, 32]) -> t$5: f32[1, 32]
<method 'sum' of 'torch._C.TensorBase' objects>(dt$6: f32[8, 32]| S(0)) -> dt$8: f32[]| P(sum)
aten::sum(dt$6: f32[8, 32]| S(0))
aten::sum(t$5: f32[1, 32]) -> t$7: f32[]""",
)
self.assertTrue(isinstance(debug_mode.operators[0], _OpCall))
self.assertTrue(isinstance(debug_mode.operators[2], _RedistributeCall))
self.assertEqual(next(iter(debug_mode.operators[1])), torch.ops.aten.mm.default)
# check stringification
self.assertTrue(hasattr(debug_mode.operators[0], "args_str"))
self.assertFalse(hasattr(debug_mode.operators[0], "args"))
# check recording hook
def mm(x, y):
return (x @ y).sum()
eager_out = mm(x_dtensor, y_dtensor)
# check recording hook for compiled variant
with (
DebugMode() as debug_mode,
DebugMode.record_outputs(),
DebugMode.log_tensor_hashes(),
):
compiled_out = torch.compile(mm, backend="aot_eager")(x_dtensor, y_dtensor)
# check numerical equivalence
self.assertTrue(torch.equal(eager_out, compiled_out))
sum_op = next(
iter(
op
for op in debug_mode.operators
if isinstance(op, _OpCall) and str(op.op) == "aten.sum.default"
)
)
self.assertTrue(torch.equal(sum_op.record["output"], eager_out.to_local()))
self.assertTrue(
"aten::sum(t: f32[1, 32]) # {'hash': " in debug_mode.debug_string()
)
# check tuple hash functions
with (
DebugMode() as debug_mode,
DebugMode.log_tensor_hashes(hash_fn=["norm", "hash_tensor"]),
):
mm(x_dtensor, y_dtensor)
output_hash = debug_mode.operators[-1].log["hash"]
norm_ = lambda x: norm_hash_fn(x, use_scalar=True) # noqa: E731
hash_ = lambda x: hash_tensor_fn(x, use_scalar=True) # noqa: E731
self.assertEqual(output_hash[0], norm_(eager_out))
self.assertEqual(output_hash[1], hash_(eager_out))
# some edge cases
self.assertEqual(norm_(torch.tensor(torch.nan)), torch.nan)
self.assertEqual(norm_(torch.tensor(torch.inf)), torch.inf)
self.assertEqual(norm_(torch.complex(torch.ones(4), torch.zeros(4))), 4)
self.assertEqual(hash_(torch.ones(4, dtype=torch.float8_e5m2)), 0)
self.assertEqual(hash_(torch.ones(4, dtype=torch.int8)), 0)
self.assertEqual(hash_(torch.ones(5, dtype=torch.int8)), 1)
def test_debug_string_inside_context(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
x = torch.randn(1, 8, requires_grad=False)
y = torch.randn(1, 32, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(0)], run_check=False)
with DebugMode() as debug_mode:
torch.mm(x_dtensor, y_dtensor).sum()
s0 = debug_mode.debug_string()
s1 = debug_mode.debug_string()
self.assertEqual(s0, s1)
def test_debug_mode_backward(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
x = torch.randn(1, 8, requires_grad=True)
y = torch.randn(8, 1, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(1)], run_check=False)
with DebugMode(
record_torchfunction=True, record_stack_trace=True
) as debug_mode:
z = x_dtensor + y_dtensor
z.sum().backward()
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
<method 'add' of 'torch._C.TensorBase' objects>(dt: f32[8, 8]| S(0), dt: f32[8, 8]| S(1))
aten::add.Tensor(dt: f32[8, 8]| S(0), dt: f32[8, 8]| S(1))
redistribute_input(1, S(1) -> S(0))
redistribute_input(t: f32[8, 1], trace: S(1)->S(0))
_dtensor::shard_dim_alltoall(t: f32[8, 1], 1, 0, 0)
aten::add.Tensor(t: f32[1, 8], t: f32[1, 8])
<method 'sum' of 'torch._C.TensorBase' objects>(dt: f32[8, 8]| S(0))
aten::sum(dt: f32[8, 8]| S(0))
aten::sum(t: f32[1, 8])
torch._tensor.backward(dt: f32[]| P(sum), gradient=None, retain_graph=None, create_graph=False, inputs=None)
aten::ones_like(dt: f32[]| P(sum), pin_memory=False, memory_format=torch.preserve_format)
aten::ones_like(t: f32[], pin_memory=False, memory_format=torch.preserve_format)
aten::expand(dt: f32[]| R, [8, 8])
aten::expand(t: f32[], [8, 8])
redistribute_input(t: f32[8, 8], trace: R->S(1))
aten::split.Tensor(t: f32[8, 8], 1, 1)
aten::clone(t: f32[8, 1])
aten::_to_copy(t: f32[8, 1], dtype=torch.float32, layout=torch.strided, device=cpu)
redistribute_input(t: f32[8, 8], trace: R->S(0))
aten::split.Tensor(t: f32[8, 8], 1)
aten::clone(t: f32[1, 8])
aten::detach(t: f32[8, 1])
aten::_to_copy(t: f32[1, 8], dtype=torch.float32, layout=torch.strided, device=cpu)
aten::detach(t: f32[1, 8])""",
)
# check stack trace
self.assertTrue("z.sum().backward()" in debug_mode.operators[-1].stack_trace)
def test_debug_mode_densor_redistribution_trace(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size).view(4, 2))
x = torch.randn(16, 8, requires_grad=True)
y = torch.randn(8, 16, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0), Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(1), Shard(1)], run_check=False)
x_dtensor._spec.shard_order = (ShardOrderEntry(tensor_dim=0, mesh_dims=(0, 1)),)
y_dtensor._spec.shard_order = (ShardOrderEntry(tensor_dim=1, mesh_dims=(0, 1)),)
with DebugMode(record_torchfunction=False) as debug_mode:
torch.mm(x_dtensor, y_dtensor).sum()
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
aten::mm(dt: f32[128, 8]| S(0)[0]S(0)[1], dt: f32[8, 128]| S(1)[0]S(1)[1])
redistribute_input(0, S(0)[0]S(0)[1] -> S(0)R)
redistribute_input(t: f32[16, 8], trace: S(0)[0]S(0)[1]->S(0)R)
_c10d_functional::all_gather_into_tensor(t: f32[16, 8], 2, 3)
_c10d_functional::wait_tensor(t: f32[32, 8])
redistribute_input(1, S(1)[0]S(1)[1] -> RS(1))
redistribute_input(t: f32[8, 16], trace: S(1)[0]S(1)[1]->S(1)R->RR->RS(1))
_c10d_functional::all_gather_into_tensor(t: f32[8, 16], 2, 3)
_c10d_functional::wait_tensor(t: f32[16, 16])
aten::chunk(t: f32[16, 16], 2)
aten::cat(['t: f32[8, 16]', 't: f32[8, 16]'], 1)
_c10d_functional::all_gather_into_tensor(t: f32[8, 32], 4, 1)
_c10d_functional::wait_tensor(t: f32[32, 32])
aten::chunk(t: f32[32, 32], 4)
aten::cat(['t: f32[8, 32]', 't: f32[8, 32]', 't: f32[8, 32]', 't: f32[8, 32]'], 1)
aten::chunk(t: f32[8, 128], 2, 1)
aten::clone(t: f32[8, 64])
aten::mm(t: f32[32, 8], t: f32[8, 64])
aten::sum(dt: f32[128, 128]| S(0)S(1))
aten::sum(t: f32[32, 64])""",
)
def test_debug_mode_einsum(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size).view(4, 2))
# Create test tensors
a = torch.randn(16, 6, 8)
b = torch.randn(8, 4, 4)
a_dt = DTensor.from_local(a, mesh, [Partial(), Replicate()], run_check=False)
b_dt = DTensor.from_local(b, mesh, [Replicate(), Partial()], run_check=False)
# Capture the operator decomposition
with DebugMode(record_torchfunction=True) as debug_mode:
torch.einsum("bld,dnh->blnh", a_dt, b_dt)
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
torch.functional.einsum(bld,dnh->blnh, dt: f32[16, 6, 8]| P(sum)R, dt: f32[8, 4, 4]| RP(sum))
aten::unsqueeze(dt: f32[16, 6, 8]| P(sum)R, 3)
aten::unsqueeze(t: f32[16, 6, 8], 3)
aten::unsqueeze(dt: f32[16, 6, 8, 1]| P(sum)R, 4)
aten::unsqueeze(t: f32[16, 6, 8, 1], 4)
aten::permute(dt: f32[16, 6, 8, 1, 1]| P(sum)R, [0, 1, 3, 4, 2])
aten::permute(t: f32[16, 6, 8, 1, 1], [0, 1, 3, 4, 2])
aten::unsqueeze(dt: f32[8, 4, 4]| RP(sum), 3)
aten::unsqueeze(t: f32[8, 4, 4], 3)
aten::unsqueeze(dt: f32[8, 4, 4, 1]| RP(sum), 4)
aten::unsqueeze(t: f32[8, 4, 4, 1], 4)
aten::permute(dt: f32[8, 4, 4, 1, 1]| RP(sum), [3, 4, 1, 2, 0])
aten::permute(t: f32[8, 4, 4, 1, 1], [3, 4, 1, 2, 0])
aten::permute(dt: f32[16, 6, 1, 1, 8]| P(sum)R, [0, 1, 4, 2, 3])
aten::permute(t: f32[16, 6, 1, 1, 8], [0, 1, 4, 2, 3])
aten::view(dt: f32[16, 6, 8, 1, 1]| P(sum)R, [1, 96, 8])
aten::view(t: f32[16, 6, 8, 1, 1], [1, 96, 8])
aten::permute(dt: f32[1, 1, 4, 4, 8]| RP(sum), [4, 2, 3, 0, 1])
aten::permute(t: f32[1, 1, 4, 4, 8], [4, 2, 3, 0, 1])
aten::view(dt: f32[8, 4, 4, 1, 1]| RP(sum), [1, 8, 16])
aten::view(t: f32[8, 4, 4, 1, 1], [1, 8, 16])
aten::bmm(dt: f32[1, 96, 8]| P(sum)R, dt: f32[1, 8, 16]| RP(sum))
redistribute_input(0, P(sum)R -> S(2)[0]S(2)[1])
redistribute_input(t: f32[1, 96, 8], trace: P(sum)R->S(2)R->S(2)[0]S(2)[1])
aten::chunk(t: f32[1, 96, 8], 4, 2)
aten::cat(['t: f32[1, 96, 2]', 't: f32[1, 96, 2]', 't: f32[1, 96, 2]', 't: f32[1, 96, 2]'])
_c10d_functional::reduce_scatter_tensor(t: f32[4, 96, 2], sum, 4, 1)
_c10d_functional::wait_tensor(t: f32[1, 96, 2])
aten::chunk(t: f32[1, 96, 2], 2, 2)
aten::clone(t: f32[1, 96, 1])
redistribute_input(1, RP(sum) -> S(1)[0]S(1)[1])
redistribute_input(t: f32[1, 8, 16], trace: RP(sum)->S(1)P(sum)->S(1)[0]S(1)[1])
aten::chunk(t: f32[1, 8, 16], 4, 1)
aten::clone(t: f32[1, 2, 16])
aten::chunk(t: f32[1, 2, 16], 2, 1)
aten::cat(['t: f32[1, 1, 16]', 't: f32[1, 1, 16]'])
_c10d_functional::reduce_scatter_tensor(t: f32[2, 1, 16], sum, 2, 3)
_c10d_functional::wait_tensor(t: f32[1, 1, 16])
aten::bmm(t: f32[1, 96, 1], t: f32[1, 1, 16])
aten::view(dt: f32[1, 96, 16]| P(sum)P(sum), [16, 6, 1, 4, 4])
aten::view(t: f32[1, 96, 16], [16, 6, 1, 4, 4])
aten::permute(dt: f32[16, 6, 1, 4, 4]| P(sum)P(sum), [0, 1, 3, 4, 2])
aten::permute(t: f32[16, 6, 1, 4, 4], [0, 1, 3, 4, 2])
aten::view(dt: f32[16, 6, 4, 4, 1]| P(sum)P(sum), [16, 6, 4, 4])
aten::view(t: f32[16, 6, 4, 4, 1], [16, 6, 4, 4])""",
)
def test_real_tensor(self):
x = torch.randn(8, 8, 8)
linear = torch.nn.Linear(8, 8)
with DebugMode(record_torchfunction=True) as debug_mode:
linear(x).sum()
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
torch._C._nn.linear(t: f32[8, 8, 8], t: f32[8, 8], t: f32[8])
aten::view(t: f32[8, 8, 8], [64, 8])
aten::t(t: f32[8, 8])
aten::addmm(t: f32[8], t: f32[64, 8], t: f32[8, 8])
aten::view(t: f32[64, 8], [8, 8, 8])
<method 'sum' of 'torch._C.TensorBase' objects>(t: f32[8, 8, 8])
aten::sum(t: f32[8, 8, 8])""",
)
def test_fake_tensor(self):
with FakeTensorMode():
x = torch.randn(8, 8)
y = torch.randn(8, 8, 8)
with DebugMode(record_torchfunction=True, record_faketensor=True) as debug_mode:
torch.matmul(y, x)
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
torch.matmul(ft: f32[8, 8, 8], ft: f32[8, 8])
aten::view(ft: f32[8, 8, 8], [64, 8])
aten::mm(ft: f32[64, 8], ft: f32[8, 8])
aten::_unsafe_view(ft: f32[64, 8], [8, 8, 8])""",
)
def test_tensor_attributes(self):
x = torch.randn(8, 8)
x.a1 = "x1"
x.a2 = "x2"
y = torch.randn(8, 8, 8)
y.a1 = "y"
with DebugMode(
record_torchfunction=True,
record_faketensor=True,
record_tensor_attributes=["a1", "a2"],
store_original_args=True,
) as debug_mode:
torch.matmul(y, x)
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
torch.matmul(t: f32[8, 8, 8]{a1=y}, t: f32[8, 8]{a1=x1, a2=x2})
aten::view(t: f32[8, 8, 8]{a1=y}, [64, 8])
aten::mm(t: f32[64, 8], t: f32[8, 8]{a1=x1, a2=x2})
aten::_unsafe_view(t: f32[64, 8], [8, 8, 8])""",
)
self.assertTrue(hasattr(debug_mode.operators[0], "args"))
self.assertEqual(id(debug_mode.operators[0].args[0]), id(y))
@parametrize("has_inner_mode", [True, False])
@parametrize("has_outer_mode", [True, False])
def test_nested_debug_mode(self, has_inner_mode, has_outer_mode):
class DummyTorchDispatchMode1(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
return func(*args, **kwargs)
class DummyTorchDispatchMode2(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
return func(*args, **kwargs)
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
x = torch.randn(1, 8, requires_grad=True)
y = torch.randn(1, 32, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(0)], run_check=False)
inner_mode = (
DummyTorchDispatchMode1() if has_inner_mode else contextlib.nullcontext()
)
outer_mode = (
DummyTorchDispatchMode2() if has_outer_mode else contextlib.nullcontext()
)
with outer_mode:
with DebugMode() as debug_mode:
with inner_mode:
torch.mm(x_dtensor, y_dtensor)
self.assertTrue("redistribute_input(1, S(0) -> R)" in debug_mode.debug_string())
def test_debug_mode_higher_order_cond(self):
"""Test DebugMode with higher order operation."""
x = torch.randn(1, 8, requires_grad=True)
with DebugMode(record_torchfunction=True) as debug_mode:
# rewrite torch.conda as torch.ops.higher_order.cond to avoid compilation
torch.ops.higher_order.cond(
torch.tensor(True), lambda x: x + 1, lambda x: x - 1, (x,)
)
# Verify that cond operations are captured in debug mode
self.assertIn("torch.ops.higher_order.cond", debug_mode.debug_string())
def test_compile(self):
cnt = CompileCounterWithBackend("inductor")
@torch.compile(backend=cnt)
def f(x):
return x.sin().cos()
x = torch.randn(8)
f(x)
with DebugMode() as debug_mode:
f(x)
self.assertEqual(len(debug_mode.debug_string()), 0)
f(x)
f(x)
self.assertEqual(
cnt.frame_count, 1
) # check DebugMode doesn't trigger additional recompilations
def test_nn_module(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(4, 4)
self.l2 = torch.nn.Linear(4, 4)
def forward(self, x):
return self.l2(self.l1(x))
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.abc = Foo()
self.xyz = torch.nn.Linear(4, 4)
def forward(self, x):
return self.xyz(self.abc(x))
mod = Bar()
inp = torch.randn(4, 4)
with DebugMode(record_nn_module=True) as debug_mode:
_ = mod(inp)
self.assertExpectedInline(
debug_mode.debug_string(),
"""\
[nn.Mod] Bar
[nn.Mod] Bar.abc
[nn.Mod] Bar.abc.l1
aten::t(t: f32[4, 4])
aten::addmm(t: f32[4], t: f32[4, 4], t: f32[4, 4])
[nn.Mod] Bar.abc.l2
aten::t(t: f32[4, 4])
aten::addmm(t: f32[4], t: f32[4, 4], t: f32[4, 4])
[nn.Mod] Bar.xyz
aten::t(t: f32[4, 4])
aten::addmm(t: f32[4], t: f32[4, 4], t: f32[4, 4])""",
)
with DebugMode(record_stack_trace=True) as debug_mode:
out = mod(inp).sum()
out.backward()
sum_op = [
op for op in debug_mode.operators if str(op.op) == "aten.sum.dim_IntList"
][-1]
self.assertTrue("self.l2(self.l1(x))" in sum_op.fwd_stack_trace)
self.assertTrue(
"self.l2(self.l1(x))" in debug_mode.debug_string(show_stack_trace=True)
)
@unittest.skipIf(not HAS_GPU, "requires GPU")
@unittest.skipIf(not has_triton_package(), "requires triton")
def test_triton_kernel_logs(self):
import triton
from torch.testing._internal.triton_utils import add_kernel_autotuned
def call_triton(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) # noqa: E731
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
x = torch.randn(128, device=GPU_TYPE)
y = torch.randn(128, device=GPU_TYPE)
with DebugMode() as debug_mode:
torch.compile(call_triton)(x, y)
triton_calls = [
op for op in debug_mode.operators if isinstance(op, _TritonKernelCall)
]
self.assertGreater(len(triton_calls), 0)
self.assertIn("[triton]", triton_calls[0].render([]))
def test_check_hash_mismatches(self):
x = torch.randn(64, 64, device=GPU_TYPE)
x_different = torch.randn(64, 64, device=GPU_TYPE)
# Identical runs should have no mismatches
with DebugMode() as dm1, DebugMode.log_tensor_hashes():
x.sin().sum()
with DebugMode() as dm2, DebugMode.log_tensor_hashes():
x.sin().sum()
mismatches = DebugMode.check_hash_mismatches(dm1.logs, dm2.logs)
self.assertEqual(len(mismatches), 0)
# Different inputs should produce hash mismatches
with DebugMode() as dm3, DebugMode.log_tensor_hashes():
x_different.sin().sum()
# Check that mismatches are detected
mismatches = DebugMode.check_hash_mismatches(dm1.logs, dm3.logs)
self.assertEqual(len(mismatches), 2)
self.assertEqual(
[call["call"] for call in mismatches], ["aten::sin", "aten::sum"]
)
@unittest.skipIf(not HAS_GPU, "requires GPU")
@unittest.skipIf(not has_triton_package(), "requires triton")
def test_check_triton_hash_mismatches(self):
import triton
from torch.testing._internal.triton_utils import add_kernel_autotuned
def call_triton(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) # noqa: E731
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
a = torch.randn(128, device=GPU_TYPE)
b = torch.randn(128, device=GPU_TYPE)
c = torch.randn(128, device=GPU_TYPE)
# Run with hash logging to verify triton kernels can be hashed
with DebugMode() as dm_t1, DebugMode.log_tensor_hashes(hash_inputs=True):
torch.compile(call_triton)(a, b)
# Different inputs should have different hashes in triton kernels
with DebugMode() as dm_t2, DebugMode.log_tensor_hashes(hash_inputs=True):
torch.compile(call_triton)(a, c)
# Compare triton kernel hashes
mismatches = DebugMode.check_hash_mismatches(
dm_t1.logs, dm_t2.logs, compare_inputs=True
)
triton_mismatches = [m for m in mismatches if m["call_type"] == "triton kernel"]
self.assertGreater(len(triton_mismatches), 0)
# check both input & output hash mismatches are detected
self.assertGreater(len([m for m in triton_mismatches if m["is_input_hash"]]), 0)
self.assertGreater(
len([m for m in triton_mismatches if not m["is_input_hash"]]), 0
)
def test_check_structure_mismatches(self):
x = torch.randn(32, 32, device=self.device_type)
with DebugMode() as dm1, DebugMode.log_tensor_hashes():
x.sin()
with DebugMode() as dm2, DebugMode.log_tensor_hashes():
x.cos()
with DebugMode() as dm3, DebugMode.log_tensor_hashes():
x.sin().cos()
with self.assertRaisesRegex(ValueError, "Operators don't match"):
DebugMode.check_hash_mismatches(dm1.logs, dm2.logs)
with self.assertRaisesRegex(ValueError, "Log lengths don't match"):
DebugMode.check_hash_mismatches(dm1.logs, dm3.logs)
def test_pretty_print_dtensor_make_fx(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
A = torch.randn(8, 32)
B = torch.randn(32, 32)
dA = distribute_tensor(A, mesh, [Shard(0)]).requires_grad_()
dB = distribute_tensor(B, mesh, [Replicate()]).requires_grad_()
def f(dA, dB):
dy = dA @ dB
loss = dy.sum()
loss.backward()
return dA.grad, dB.grad
# We actually need the tracing_mode='fake' here, or to trace under a FakeTensorMode.
# make_fx has some logic to ensure we don't accidentally stash real tensors in the graph
# so we won't stash our DTensors properly if they don't hold Fake inner tensors
gm = make_fx(f, tracing_mode="fake")(dA, dB)
# DCE isn't necessary here, there were just a lot of dead detach() nodes that spammed the graph
gm.graph.eliminate_dead_code()
gm.recompile()
# Colored is nice for actual viewing, not using in this test though
gm_str = gm.print_readable(colored=False, print_output=False)
self.assertTrue('"DTensor(f32[8, 32], S(0))" = torch.ops.aten.mm' in gm_str)
instantiate_parametrized_tests(TestDTensorDebugMode)
if __name__ == "__main__":
run_tests()
| TestDTensorDebugMode |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_allocation_result.py | {
"start": 383,
"end": 5868
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allocation_timestamp': 'datetime',
'devices': 'V1beta1DeviceAllocationResult',
'node_selector': 'V1NodeSelector'
}
attribute_map = {
'allocation_timestamp': 'allocationTimestamp',
'devices': 'devices',
'node_selector': 'nodeSelector'
}
def __init__(self, allocation_timestamp=None, devices=None, node_selector=None, local_vars_configuration=None): # noqa: E501
"""V1beta1AllocationResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allocation_timestamp = None
self._devices = None
self._node_selector = None
self.discriminator = None
if allocation_timestamp is not None:
self.allocation_timestamp = allocation_timestamp
if devices is not None:
self.devices = devices
if node_selector is not None:
self.node_selector = node_selector
@property
def allocation_timestamp(self):
"""Gets the allocation_timestamp of this V1beta1AllocationResult. # noqa: E501
AllocationTimestamp stores the time when the resources were allocated. This field is not guaranteed to be set, in which case that time is unknown. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gate. # noqa: E501
:return: The allocation_timestamp of this V1beta1AllocationResult. # noqa: E501
:rtype: datetime
"""
return self._allocation_timestamp
@allocation_timestamp.setter
def allocation_timestamp(self, allocation_timestamp):
"""Sets the allocation_timestamp of this V1beta1AllocationResult.
AllocationTimestamp stores the time when the resources were allocated. This field is not guaranteed to be set, in which case that time is unknown. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gate. # noqa: E501
:param allocation_timestamp: The allocation_timestamp of this V1beta1AllocationResult. # noqa: E501
:type: datetime
"""
self._allocation_timestamp = allocation_timestamp
@property
def devices(self):
"""Gets the devices of this V1beta1AllocationResult. # noqa: E501
:return: The devices of this V1beta1AllocationResult. # noqa: E501
:rtype: V1beta1DeviceAllocationResult
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1beta1AllocationResult.
:param devices: The devices of this V1beta1AllocationResult. # noqa: E501
:type: V1beta1DeviceAllocationResult
"""
self._devices = devices
@property
def node_selector(self):
"""Gets the node_selector of this V1beta1AllocationResult. # noqa: E501
:return: The node_selector of this V1beta1AllocationResult. # noqa: E501
:rtype: V1NodeSelector
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this V1beta1AllocationResult.
:param node_selector: The node_selector of this V1beta1AllocationResult. # noqa: E501
:type: V1NodeSelector
"""
self._node_selector = node_selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1AllocationResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1AllocationResult):
return True
return self.to_dict() != other.to_dict()
| V1beta1AllocationResult |
python | scipy__scipy | scipy/optimize/tests/test_optimize.py | {
"start": 72756,
"end": 75212
} | class ____:
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert d['warnflag'] == 0, d['task']
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with fun and jac combined and extra arguments
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert d['warnflag'] == 0, d['task']
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
# Minimize with method='L-BFGS-B' with bounds
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert res['success'], res['message']
assert_allclose(res.x, self.solution, atol=1e-6)
@pytest.mark.parametrize('bounds', [
([(10, 1), (1, 10)]),
([(1, 10), (10, 1)]),
([(10, 1), (10, 1)])
])
def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):
with pytest.raises(ValueError, match='.*bound.*'):
optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=bounds)
def test_minimize_l_bfgs_b_bounds_FD(self):
# test that initial starting value outside bounds doesn't raise
# an error (done with clipping).
# test all different finite differences combos, with and without args
jacs = ['2-point', '3-point', None]
argss = [(2.,), ()]
for jac, args in itertools.product(jacs, argss):
res = optimize.minimize(self.fun, [0, -1], args=args,
method='L-BFGS-B',
jac=jac, bounds=self.bounds,
options={'finite_diff_rel_step': None})
assert res['success'], res['message']
assert_allclose(res.x, self.solution, atol=1e-6)
| TestLBFGSBBounds |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 93577,
"end": 94237
} | class ____(INT8MMTemplateConfigMixin, XPUConfigHeuristic):
"""Int8 MM template heuristic for XPU"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use int8_mm_configs
self.mm_configs = self.int8_mm_configs
# NOTE: overriding exhaustive configs here to be the same as mm_configs
# as we haven't validated exhaustive support here yet
# TODO(coconutruben): remove this once we have validated exhaustive support
# for scaled_mm
self.exhaustive_configs = self.int8_mm_configs
@register_template_heuristic(mm_plus_mm_template.uid, "xpu")
| XPUInt8MMTemplateConfigHeuristic |
python | astropy__astropy | astropy/utils/console.py | {
"start": 32287,
"end": 32446
} | class ____:
def __init__(self):
import msvcrt # noqa: F401
def __call__(self):
import msvcrt
return msvcrt.getch()
| _GetchWindows |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wxagg.py | {
"start": 227,
"end": 1398
} | class ____(FigureCanvasAgg, _FigureCanvasWxBase):
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
FigureCanvasAgg.draw(self)
self.bitmap = self._create_bitmap()
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
# docstring inherited
bitmap = self._create_bitmap()
if bbox is None:
self.bitmap = bitmap
else:
srcDC = wx.MemoryDC(bitmap)
destDC = wx.MemoryDC(self.bitmap)
x = int(bbox.x0)
y = int(self.bitmap.GetHeight() - bbox.y1)
destDC.Blit(x, y, int(bbox.width), int(bbox.height), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
def _create_bitmap(self):
"""Create a wx.Bitmap from the renderer RGBA buffer"""
rgba = self.get_renderer().buffer_rgba()
h, w, _ = rgba.shape
bitmap = wx.Bitmap.FromBufferRGBA(w, h, rgba)
bitmap.SetScaleFactor(self.GetDPIScaleFactor())
return bitmap
@_BackendWx.export
| FigureCanvasWxAgg |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/analytics_admin.py | {
"start": 5130,
"end": 8274
} | class ____(GoogleCloudBaseOperator):
"""
Creates property.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAnalyticsAdminCreatePropertyOperator`
:param analytics_property: The property to create. Note: the supplied property must specify its parent.
For more details see: https://developers.google.com/analytics/devguides/config/admin/v1/rest/v1beta/properties#Property
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"gcp_conn_id",
"impersonation_chain",
"analytics_property",
)
operator_extra_links = (GoogleAnalyticsPropertyLink(),)
def __init__(
self,
*,
analytics_property: Property | dict[str, Any],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.analytics_property = analytics_property
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(
self,
context: Context,
) -> Message:
hook = GoogleAnalyticsAdminHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating a Google Analytics property.")
prop = hook.create_property(
analytics_property=self.analytics_property,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("The Google Analytics property %s was created successfully.", prop.name)
GoogleAnalyticsPropertyLink.persist(
context=context,
property_id=prop.name.lstrip("properties/"),
)
return Property.to_dict(prop)
| GoogleAnalyticsAdminCreatePropertyOperator |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/component_tree.py | {
"start": 21781,
"end": 23222
} | class ____(ComponentTree):
"""ComponentTree variant which terminates autoloading of defs on the keyword
files `definitions.py` and `component.py`. This should only be used for legacy
test and load_defs codepaths.
"""
@property
def decl_load_context(self):
return ComponentDeclLoadContext(
component_path=ComponentPath.from_path(self.defs_module_path),
project_root=self.project_root,
defs_module_path=self.defs_module_path,
defs_module_name=self.defs_module_name,
resolution_context=ResolutionContext.default(),
terminate_autoloading_on_keyword_files=True,
component_tree=self,
)
@staticmethod
def from_module(
defs_module: ModuleType,
project_root: Path,
) -> "ComponentTree":
"""Convenience method for creating a ComponentTree from a module.
Args:
defs_module: The defs module of the project, typically the `defs` directory.
project_root: The root of the project.
terminate_autoloading_on_keyword_files: Whether to terminate autoloading on keyword files such as
`definitions.py` or `component.py`.
Returns:
A ComponentTree.
"""
return LegacyAutoloadingComponentTree(
defs_module=defs_module,
project_root=project_root,
)
| LegacyAutoloadingComponentTree |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-nvidia/tests/test_structured_output.py | {
"start": 726,
"end": 824
} | class ____(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
| Song |
python | davidhalter__jedi | test/completion/pep0484_generic_parameters.py | {
"start": 7676,
"end": 7972
} | class ____(Mapping[str, T]):
pass
custom_partial1_instance: CustomPartialGeneric1[int] = NotImplemented
#? str()
first(custom_partial1_instance)
custom_partial1_unbound_instance: CustomPartialGeneric1 = NotImplemented
#? str()
first(custom_partial1_unbound_instance)
| CustomPartialGeneric1 |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 13074,
"end": 21157
} | class ____:
"""
Tests for `_add_hash`.
"""
def test_enforces_type(self):
"""
The `hash` argument to both attrs and attrib must be None, True, or
False.
"""
exc_args = ("Invalid value for hash. Must be True, False, or None.",)
with pytest.raises(TypeError) as e:
make_class("C", {}, unsafe_hash=1)
assert exc_args == e.value.args
with pytest.raises(TypeError) as e:
make_class("C", {"a": attr.ib(hash=1)})
assert exc_args == e.value.args
def test_enforce_no_cache_hash_without_hash(self):
"""
Ensure exception is thrown if caching the hash code is requested
but attrs is not requested to generate `__hash__`.
"""
exc_args = (
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled.",
)
with pytest.raises(TypeError) as e:
make_class("C", {}, unsafe_hash=False, cache_hash=True)
assert exc_args == e.value.args
# unhashable case
with pytest.raises(TypeError) as e:
make_class(
"C",
{},
unsafe_hash=None,
eq=True,
frozen=False,
cache_hash=True,
)
assert exc_args == e.value.args
def test_enforce_no_cached_hash_without_init(self):
"""
Ensure exception is thrown if caching the hash code is requested
but attrs is not requested to generate `__init__`.
"""
exc_args = (
"Invalid value for cache_hash. To use hash caching,"
" init must be True.",
)
with pytest.raises(TypeError) as e:
make_class("C", {}, init=False, unsafe_hash=True, cache_hash=True)
assert exc_args == e.value.args
@given(booleans(), booleans())
def test_hash_attribute(self, slots, cache_hash):
"""
If `hash` is False on an attribute, ignore that attribute.
"""
C = make_class(
"C",
{"a": attr.ib(hash=False), "b": attr.ib()},
slots=slots,
unsafe_hash=True,
cache_hash=cache_hash,
)
assert hash(C(1, 2)) == hash(C(2, 2))
@given(booleans())
def test_hash_attribute_mirrors_eq(self, eq):
"""
If `hash` is None, the hash generation mirrors `eq`.
"""
C = make_class("C", {"a": attr.ib(eq=eq)}, eq=True, frozen=True)
if eq:
assert C(1) != C(2)
assert hash(C(1)) != hash(C(2))
assert hash(C(1)) == hash(C(1))
else:
assert C(1) == C(2)
assert hash(C(1)) == hash(C(2))
@given(booleans())
def test_hash_mirrors_eq(self, eq):
"""
If `hash` is None, the hash generation mirrors `eq`.
"""
C = make_class("C", {"a": attr.ib()}, eq=eq, frozen=True)
i = C(1)
assert i == i
assert hash(i) == hash(i)
if eq:
assert C(1) == C(1)
assert hash(C(1)) == hash(C(1))
else:
assert C(1) != C(1)
assert hash(C(1)) != hash(C(1))
@pytest.mark.parametrize(
"cls",
[
HashC,
HashCSlots,
HashCCached,
HashCSlotsCached,
HashCFrozenNotSlotsCached,
],
)
def test_hash_works(self, cls):
"""
__hash__ returns different hashes for different values.
"""
a = cls(1, 2)
b = cls(1, 1)
assert hash(a) != hash(b)
# perform the test again to test the pre-cached path through
# __hash__ for the cached-hash versions
assert hash(a) != hash(b)
def test_hash_default(self):
"""
Classes are not hashable by default.
"""
C = make_class("C", {})
with pytest.raises(TypeError) as e:
hash(C())
assert e.value.args[0] in (
"'C' objects are unhashable", # PyPy
"unhashable type: 'C'", # CPython
)
def test_cache_hashing(self):
"""
Ensure that hash computation if cached if and only if requested
"""
class HashCounter:
"""
A class for testing which counts how many times its hash
has been requested
"""
def __init__(self):
self.times_hash_called = 0
def __hash__(self):
self.times_hash_called += 1
return 12345
Uncached = make_class(
"Uncached",
{"hash_counter": attr.ib(factory=HashCounter)},
unsafe_hash=True,
cache_hash=False,
)
Cached = make_class(
"Cached",
{"hash_counter": attr.ib(factory=HashCounter)},
unsafe_hash=True,
cache_hash=True,
)
uncached_instance = Uncached()
cached_instance = Cached()
hash(uncached_instance)
hash(uncached_instance)
hash(cached_instance)
hash(cached_instance)
assert 2 == uncached_instance.hash_counter.times_hash_called
assert 1 == cached_instance.hash_counter.times_hash_called
@pytest.mark.parametrize("cache_hash", [True, False])
def test_copy_hash_cleared(self, cache_hash, frozen, slots):
"""
Test that the default hash is recalculated after a copy operation.
"""
kwargs = {"frozen": frozen, "slots": slots, "cache_hash": cache_hash}
# Give it an explicit hash if we don't have an implicit one
if not frozen:
kwargs["unsafe_hash"] = True
@attr.s(**kwargs)
class C:
x = attr.ib()
a = C(IncrementingHasher())
# Ensure that any hash cache would be calculated before copy
orig_hash = hash(a)
b = copy.deepcopy(a)
if kwargs["cache_hash"]:
# For cache_hash classes, this call is cached
assert orig_hash == hash(a)
assert orig_hash != hash(b)
@pytest.mark.parametrize(
("klass", "cached"),
[
(HashCacheSerializationTestUncached, False),
(HashCacheSerializationTestCached, True),
(HashCacheSerializationTestCachedSlots, True),
],
)
def test_cache_hash_serialization_hash_cleared(self, klass, cached):
"""
Tests that the hash cache is cleared on deserialization to fix
https://github.com/python-attrs/attrs/issues/482 .
This test is intended to guard against a stale hash code surviving
across serialization (which may cause problems when the hash value
is different in different interpreters).
"""
obj = klass(IncrementingHasher())
original_hash = hash(obj)
obj_rt = self._roundtrip_pickle(obj)
if cached:
assert original_hash == hash(obj)
assert original_hash != hash(obj_rt)
def test_copy_two_arg_reduce(self, frozen):
"""
If __getstate__ returns None, the tuple returned by object.__reduce__
won't contain the state dictionary; this test ensures that the custom
__reduce__ generated when cache_hash=True works in that case.
"""
@attr.s(frozen=frozen, cache_hash=True, unsafe_hash=True)
class C:
x = attr.ib()
def __getstate__(self):
return None
# By the nature of this test it doesn't really create an object that's
# in a valid state - it basically does the equivalent of
# `object.__new__(C)`, so it doesn't make much sense to assert anything
# about the result of the copy. This test will just check that it
# doesn't raise an *error*.
copy.deepcopy(C(1))
def _roundtrip_pickle(self, obj):
pickle_str = pickle.dumps(obj)
return pickle.loads(pickle_str)
| TestAddHash |
python | plotly__plotly.py | plotly/graph_objs/scatter/_unselected.py | {
"start": 233,
"end": 3352
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter"
_path_str = "scatter.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatter.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatter.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatter.unselected.Marker`
instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatter.unselected.Textfon
t` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.Unselected`
marker
:class:`plotly.graph_objects.scatter.unselected.Marker`
instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatter.unselected.Textfon
t` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | lazyprogrammer__machine_learning_examples | ann_class2/batch_norm_tf.py | {
"start": 2256,
"end": 5711
} | class ____(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def set_session(self, session):
self.session = session
def fit(self, X, Y, Xtest, Ytest, activation=tf.nn.relu, learning_rate=1e-2, epochs=15, batch_sz=100, print_period=100, show_fig=True):
X = X.astype(np.float32)
Y = Y.astype(np.int32)
# initialize hidden layers
N, D = X.shape
self.layers = []
M1 = D
for M2 in self.hidden_layer_sizes:
h = HiddenLayerBatchNorm(M1, M2, activation)
self.layers.append(h)
M1 = M2
# final layer
K = len(set(Y))
h = HiddenLayer(M1, K, lambda x: x)
self.layers.append(h)
if batch_sz is None:
batch_sz = N
# note! we will need to build the output differently
# for train and test (prediction)
# set up theano functions and variables
tfX = tf.placeholder(tf.float32, shape=(None, D), name='X')
tfY = tf.placeholder(tf.int32, shape=(None,), name='Y')
# for later use
self.tfX = tfX
# for training
logits = self.forward(tfX, is_training=True)
cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tfY
)
)
# train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# train_op = tf.train.RMSPropOptimizer(learning_rate, decay=0.99, momentum=0.9).minimize(cost)
train_op = tf.train.MomentumOptimizer(learning_rate, momentum=0.9, use_nesterov=True).minimize(cost)
# train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# for testing
test_logits = self.forward(tfX, is_training=False)
self.predict_op = tf.argmax(test_logits, 1)
# accuracy = tf.reduce_mean(1.0*(tfY == tf.argmax(logits, 1)))
# init the variables
self.session.run(tf.global_variables_initializer())
n_batches = N // batch_sz
costs = []
for i in range(epochs):
if n_batches > 1:
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
c, _, lgts = self.session.run([cost, train_op, logits], feed_dict={tfX: Xbatch, tfY: Ybatch})
costs.append(c)
if (j+1) % print_period == 0:
acc = np.mean(Ybatch == np.argmax(lgts, axis=1))
print("epoch:", i, "batch:", j, "n_batches:", n_batches, "cost:", c, "acc: %.2f" % acc)
# print('dbg:', self.session.run(self.layers[0].running_mean).sum())
print("Train acc:", self.score(X, Y), "Test acc:", self.score(Xtest, Ytest))
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X, is_training):
out = X
for h in self.layers[:-1]:
out = h.forward(out, is_training)
out = self.layers[-1].forward(out)
return out
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
def predict(self, X):
return self.session.run(self.predict_op, feed_dict={self.tfX: X})
def main():
# step 1: get the data and define all the usual variables
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
ann = ANN([500, 300])
session = tf.InteractiveSession()
ann.set_session(session)
ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True)
print("Train accuracy:", ann.score(Xtrain, Ytrain))
print("Test accuracy:", ann.score(Xtest, Ytest))
if __name__ == '__main__':
main()
| ANN |
python | django__django | django/db/migrations/graph.py | {
"start": 1544,
"end": 13085
} | class ____:
"""
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't
responsible for these; instead, the code to load them in here should
examine the migration files and if the replaced migrations are all either
unapplied or not present, it should ignore the replaced ones, load in just
the replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree
special-cases things within an app - namely, root nodes and leaf nodes
ignore dependencies to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterward.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
"""
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement,
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later
# iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already."
% (replacement,),
replacement,
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
def validate_consistency(self):
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
"""
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
"""
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
"""Iterative depth-first search for finding dependencies."""
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [
(n, False)
for n in sorted(node.parents if forwards else node.children)
]
return visited
def root_nodes(self, app=None):
"""
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (
not app or app == node[0]
):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (
not app or app == node[0]
):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
# Algo from GvR:
# https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
# Use child.key instead of child to speed up the frequent
# hashing.
node = child.key
if node in stack:
cycle = stack[stack.index(node) :]
raise CircularDependencyError(
", ".join("%s.%s" % n for n in cycle)
)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return "Graph: %s nodes, %s edges" % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return "<%s: nodes=%s, edges=%s>" % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(
len(node.parents) for node in self.node_map.values()
)
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project
state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| MigrationGraph |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/output/win32.py | {
"start": 19181,
"end": 22639
} | class ____:
"""
Inspired by pygments/formatters/terminal256.py
"""
def __init__(self) -> None:
self._win32_colors = self._build_color_table()
# Cache (map color string to foreground and background code).
self.best_match: dict[str, tuple[int, int]] = {}
@staticmethod
def _build_color_table() -> list[tuple[int, int, int, int, int]]:
"""
Build an RGB-to-256 color conversion table
"""
FG = FOREGROUND_COLOR
BG = BACKGROUND_COLOR
return [
(0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),
(0x00, 0x00, 0xAA, FG.BLUE, BG.BLUE),
(0x00, 0xAA, 0x00, FG.GREEN, BG.GREEN),
(0x00, 0xAA, 0xAA, FG.CYAN, BG.CYAN),
(0xAA, 0x00, 0x00, FG.RED, BG.RED),
(0xAA, 0x00, 0xAA, FG.MAGENTA, BG.MAGENTA),
(0xAA, 0xAA, 0x00, FG.YELLOW, BG.YELLOW),
(0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),
(0x44, 0x44, 0xFF, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),
(0x44, 0xFF, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),
(0x44, 0xFF, 0xFF, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),
(0xFF, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),
(0xFF, 0x44, 0xFF, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),
(0xFF, 0xFF, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),
(0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),
(0xFF, 0xFF, 0xFF, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),
]
def _closest_color(self, r: int, g: int, b: int) -> tuple[int, int]:
distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff)
fg_match = 0
bg_match = 0
for r_, g_, b_, fg_, bg_ in self._win32_colors:
rd = r - r_
gd = g - g_
bd = b - b_
d = rd * rd + gd * gd + bd * bd
if d < distance:
fg_match = fg_
bg_match = bg_
distance = d
return fg_match, bg_match
def _color_indexes(self, color: str) -> tuple[int, int]:
indexes = self.best_match.get(color, None)
if indexes is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xFF
g = (rgb >> 8) & 0xFF
b = rgb & 0xFF
indexes = self._closest_color(r, g, b)
self.best_match[color] = indexes
return indexes
def lookup_fg_color(self, fg_color: str) -> int:
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
"""
# Foreground.
if fg_color in FG_ANSI_COLORS:
return FG_ANSI_COLORS[fg_color]
else:
return self._color_indexes(fg_color)[0]
def lookup_bg_color(self, bg_color: str) -> int:
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param bg_color: Background as text. E.g. 'ffffff' or 'red'
"""
# Background.
if bg_color in BG_ANSI_COLORS:
return BG_ANSI_COLORS[bg_color]
else:
return self._color_indexes(bg_color)[1]
| ColorLookupTable |
python | sympy__sympy | sympy/stats/symbolic_probability.py | {
"start": 1198,
"end": 4705
} | class ____(Expr):
"""
Symbolic expression for the probability.
Examples
========
>>> from sympy.stats import Probability, Normal
>>> from sympy import Integral
>>> X = Normal("X", 0, 1)
>>> prob = Probability(X > 1)
>>> prob
Probability(X > 1)
Integral representation:
>>> prob.rewrite(Integral)
Integral(sqrt(2)*exp(-_z**2/2)/(2*sqrt(pi)), (_z, 1, oo))
Evaluation of the integral:
>>> prob.evaluate_integral()
sqrt(2)*(-sqrt(2)*sqrt(pi)*erf(sqrt(2)/2) + sqrt(2)*sqrt(pi))/(4*sqrt(pi))
"""
is_commutative = True
def __new__(cls, prob, condition=None, **kwargs):
prob = _sympify(prob)
if condition is None:
obj = Expr.__new__(cls, prob)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, prob, condition)
obj._condition = condition
return obj
def doit(self, **hints):
condition = self.args[0]
given_condition = self._condition
numsamples = hints.get('numsamples', False)
evaluate = hints.get('evaluate', True)
if isinstance(condition, Not):
return S.One - self.func(condition.args[0], given_condition,
evaluate=evaluate).doit(**hints)
if condition.has(RandomIndexedSymbol):
return pspace(condition).probability(condition, given_condition,
evaluate=evaluate)
if isinstance(given_condition, RandomSymbol):
condrv = random_symbols(condition)
if len(condrv) == 1 and condrv[0] == given_condition:
from sympy.stats.frv_types import BernoulliDistribution
return BernoulliDistribution(self.func(condition).doit(**hints), 0, 1)
if any(dependent(rv, given_condition) for rv in condrv):
return Probability(condition, given_condition)
else:
return Probability(condition).doit()
if given_condition is not None and \
not isinstance(given_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (given_condition))
if given_condition == False or condition is S.false:
return S.Zero
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
if condition is S.true:
return S.One
if numsamples:
return sampling_P(condition, given_condition, numsamples=numsamples)
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return Probability(given(condition, given_condition)).doit()
# Otherwise pass work off to the ProbabilitySpace
if pspace(condition) == PSpace():
return Probability(condition, given_condition)
result = pspace(condition).probability(condition)
if hasattr(result, 'doit') and evaluate:
return result.doit()
else:
return result
def _eval_rewrite_as_Integral(self, arg, condition=None, **kwargs):
return self.func(arg, condition=condition).doit(evaluate=False)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral
def evaluate_integral(self):
return self.rewrite(Integral).doit()
| Probability |
python | ray-project__ray | python/ray/train/lightning/_lightning_utils.py | {
"start": 2382,
"end": 5073
} | class ____(FSDPStrategy): # noqa: F821
"""Subclass of FSDPStrategy to ensure compatibility with Ray orchestration.
For a full list of initialization arguments, please refer to:
https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.strategies.FSDPStrategy.html
.. note::
It is recommended to upgrade `lightning>=2.1` or above when using FSDP
with Lightning, since Lightning starts to natively support `state_dict_type`,
`sharding_strategy`, `auto_wrap_policy` and other FSDP configurations from 2.1.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
record_extra_usage_tag(TagKey.TRAIN_LIGHTNING_RAYFSDPSTRATEGY, "1")
@property
def root_device(self) -> torch.device:
return ray.train.torch.get_device()
@property
def distributed_sampler_kwargs(self) -> Dict[str, Any]:
return dict(
num_replicas=self.world_size,
rank=self.global_rank,
)
def lightning_module_state_dict(self) -> Dict[str, Any]:
"""Gathers the full state dict to rank 0 on CPU.
FSDP checkpointing is broken in Lightning 2.0.x. This subclass patches the
behavior to perform a full state dict checkpointing, gathering the checkpoint
shards on rank 0 CPU. Upgrade to `lightning>=2.1` to do sharded state dict
checkpointing.
See the note in the class docstring for more details.
"""
assert self.model is not None, "Failed to get the state dict for a None model!"
if (
_TORCH_FSDP_AVAILABLE
and _LIGHTNING_GREATER_EQUAL_2_0
and _LIGHTNING_LESS_THAN_2_1
):
with FullyShardedDataParallel.state_dict_type(
module=self.model,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=FullStateDictConfig(
offload_to_cpu=True, rank0_only=True
),
):
state_dict = self.model.state_dict()
ckpt_state_dict = {}
prefix_len = len("_forward_module.")
for k, v in state_dict.items():
if k.startswith("_forward_module."):
non_prefixed_key = k[prefix_len:]
ckpt_state_dict[non_prefixed_key] = v
else:
ckpt_state_dict[k] = v
return ckpt_state_dict
else:
# Otherwise Lightning uses Fairscale FSDP, no need to unshard by ourself.
return super().lightning_module_state_dict()
@PublicAPI(stability="beta")
| RayFSDPStrategy |
python | getsentry__sentry | src/sentry/integrations/slack/webhooks/options_load.py | {
"start": 882,
"end": 4095
} | class ____(Endpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
slack_request_class = SlackOptionsLoadRequest
def is_substring(self, string, substring):
# in case either have special characters, we want to preserve the strings
# as is, so we escape both before applying re.match
substring = re.escape(substring)
return bool(re.match(substring, string, re.I))
def get_filtered_option_groups(self, group: Group, substring: str) -> list[OptionGroup]:
all_teams = group.project.teams.all()
filtered_teams = list(
filter(
lambda team: any(
[
self.is_substring(team.name, substring),
self.is_substring(team.slug, substring),
]
),
all_teams,
)
)
all_members = group.project.get_members_as_rpc_users()
filtered_members = list(
filter(
lambda member: any(
[
self.is_substring(member.display_name, substring),
self.is_substring(member.name, substring),
self.is_substring(member.email, substring),
self.is_substring(member.username, substring),
]
),
all_members,
)
)
option_groups: list[OptionGroup] = []
if filtered_teams:
team_options_group: OptionGroup = {
"label": {"type": "plain_text", "text": "Teams"},
"options": format_actor_options_slack(filtered_teams),
}
option_groups.append(team_options_group)
if filtered_members:
member_options_group: OptionGroup = {
"label": {"type": "plain_text", "text": "People"},
"options": format_actor_options_slack(filtered_members),
}
option_groups.append(member_options_group)
return option_groups
# XXX(isabella): atm this endpoint is used only for the assignment dropdown on issue alerts
def post(self, request: Request) -> Response:
try:
slack_request = self.slack_request_class(request)
slack_request.validate()
except SlackRequestError as e:
return self.respond(status=e.status)
group = (
Group.objects.select_related("project__organization")
.filter(id=slack_request.group_id)
.first()
)
if not group:
_logger.error(
"slack.options_load.request-error",
extra={
"group_id": slack_request.group_id,
"request_data": orjson.dumps(slack_request.data).decode(),
},
)
return self.respond(status=status.HTTP_400_BAD_REQUEST)
payload = {"option_groups": self.get_filtered_option_groups(group, slack_request.substring)}
return self.respond(payload)
| SlackOptionsLoadEndpoint |
python | tensorflow__tensorflow | tensorflow/python/keras/testing_utils.py | {
"start": 18712,
"end": 19771
} | class ____(models.Model):
"""A Keras subclass model."""
def __init__(self, model_layers, *args, **kwargs):
"""Instantiate a model.
Args:
model_layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of input_tensor -> the input
tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(model_layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(model_layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
| _SubclassModel |
python | doocs__leetcode | solution/1700-1799/1744.Can You Eat Your Favorite Candy on Your Favorite Day/Solution.py | {
"start": 0,
"end": 331
} | class ____:
def canEat(self, candiesCount: List[int], queries: List[List[int]]) -> List[bool]:
s = list(accumulate(candiesCount, initial=0))
ans = []
for t, day, mx in queries:
least, most = day, (day + 1) * mx
ans.append(least < s[t + 1] and most > s[t])
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py | {
"start": 14049,
"end": 19599
} | class ____(nn.Module):
"""
Multi-Head attention layer with relative position encoding. Details can be found in
https://github.com/espnet/espnet/pull/2816. Paper: https://huggingface.co/papers/1901.02860.
"""
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
"""Construct an FastSpeech2ConformerAttention object."""
super().__init__()
# We assume d_v always equals dim_key
self.num_heads = module_config["num_attention_heads"]
self.hidden_size = config.hidden_size
self.dim_key = self.hidden_size // self.num_heads
self.head_dim = self.hidden_size // self.num_heads
self.linear_q = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_k = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_v = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_out = nn.Linear(self.hidden_size, self.hidden_size)
self.dropout = nn.Dropout(p=module_config["attention_dropout_rate"])
# linear transformation for positional encoding
self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://huggingface.co/papers/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
def shift_relative_position_tensor(self, pos_tensor):
"""
Args:
pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
"""
zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)
pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)
pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))
# only keep the positions from 0 to time2
pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, : pos_tensor.size(-1) // 2 + 1]
return pos_tensor
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
pos_emb: Optional[torch.Tensor] = None,
output_attentions: Optional[torch.Tensor] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
"""
bsz, q_len, _ = hidden_states.size()
query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
bsz_pos = pos_emb.size(0)
pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim)
# (batch_size, head, time1, dim_key)
query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2)
# (batch_size, head, time1, dim_key)
query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://huggingface.co/papers/1901.02860 Section 3.3
# (batch_size, head, time1, time2)
matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1))
# compute matrix b and matrix d
# (batch_size, head, time1, 2*time1-1)
matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1))
matrix_bd = self.shift_relative_position_tensor(matrix_bd)
# (batch_size, head, time1, time2)
scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key)
# Forward attention
if attention_mask is not None:
expected_size = (bsz, 1, q_len)
if attention_mask.size() != expected_size:
raise ValueError(f"Attention mask should be of size {expected_size}, but is {attention_mask.size()}")
attention_mask = attention_mask.unsqueeze(1).eq(0)
min_value = float(torch.finfo(scores.dtype).min)
scores = scores.masked_fill(attention_mask, min_value)
attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0)
else:
attn_weights = torch.softmax(scores, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2))
attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
attn_output = self.linear_out(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
| FastSpeech2ConformerAttention |
python | encode__django-rest-framework | tests/test_response.py | {
"start": 3718,
"end": 7761
} | class ____(TestCase):
"""
End-to-end testing of renderers using an ResponseMixin on a generic view.
"""
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, b'')
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
@override_settings(ROOT_URLCONF='tests.test_response')
| RendererIntegrationTests |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_raise.py | {
"start": 5404,
"end": 7735
} | class ____(__TestCase):
def testCauseSyntax(self):
try:
try:
try:
raise TypeError
except Exception:
raise ValueError from None
except ValueError as exc:
self.assertIsNone(exc.__cause__)
self.assertTrue(exc.__suppress_context__)
exc.__suppress_context__ = False
raise exc
except ValueError as exc:
e = exc
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
self.assertIsInstance(e.__context__, TypeError)
def test_invalid_cause(self):
try:
raise IndexError from 5
except TypeError as e:
self.assertIn("exception cause", str(e))
else:
self.fail("No exception raised")
def test_class_cause(self):
try:
raise IndexError from KeyError
except IndexError as e:
self.assertIsInstance(e.__cause__, KeyError)
else:
self.fail("No exception raised")
def test_class_cause_nonexception_result(self):
with torch._dynamo.error_on_graph_break(False):
class ConstructsNone(BaseException):
@classmethod
def __new__(*args, **kwargs):
return None
try:
raise IndexError from ConstructsNone
except TypeError as e:
self.assertIn("should have returned an instance of BaseException", str(e))
except IndexError:
self.fail("Wrong kind of exception raised")
else:
self.fail("No exception raised")
def test_instance_cause(self):
cause = KeyError()
try:
raise IndexError from cause
except IndexError as e:
self.assertIs(e.__cause__, cause)
else:
self.fail("No exception raised")
def test_erroneous_cause(self):
with torch._dynamo.error_on_graph_break(False):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise IndexError from MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
| TestCause |
python | getsentry__sentry | tests/sentry/integrations/utils/test_issue_summary_for_alerts.py | {
"start": 350,
"end": 9808
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization()
self.project = self.create_project(organization=self.organization)
# Create an error group with the proper type
self.group = self.create_group(project=self.project, type=ErrorGroupType.type_id)
def test_fetch_issue_summary_returns_none_for_non_error_groups(self) -> None:
"""Test that fetch_issue_summary returns None for non-error issue categories"""
# Create a performance group for this test
performance_group = self.create_group(
project=self.project, type=PerformanceNPlusOneGroupType.type_id
)
result = fetch_issue_summary(performance_group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_with_hide_ai_features_enabled(
self, mock_has_budget, mock_rate_limited, mock_seer_ack
):
"""Test that fetch_issue_summary returns None when hideAiFeatures is True"""
# Set up all the required conditions to pass except hideAiFeatures
self.project.update_option("sentry:seer_scanner_automation", True)
self.organization.update_option("sentry:hide_ai_features", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = False
mock_has_budget.return_value = True
result = fetch_issue_summary(self.group)
assert result is None
# Verify that budget check wasn't called since we returned early
mock_has_budget.assert_not_called()
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_issue_summary")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_with_hide_ai_features_disabled(
self, mock_has_budget, mock_rate_limited, mock_seer_ack, mock_get_issue_summary
):
"""Test that fetch_issue_summary proceeds normally when hideAiFeatures is False"""
# Set up all the required conditions to pass
self.project.update_option("sentry:seer_scanner_automation", True)
self.organization.update_option("sentry:hide_ai_features", False)
self.organization.update_option("sentry:enable_seer_enhanced_alerts", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = False
mock_has_budget.return_value = True
# Mock successful summary response
mock_summary = {
"headline": "Test AI Summary",
"whatsWrong": "Something went wrong",
"possibleCause": "Test cause",
}
mock_get_issue_summary.return_value = (mock_summary, 200)
result = fetch_issue_summary(self.group)
assert result == mock_summary
mock_get_issue_summary.assert_called_once()
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
def test_fetch_issue_summary_without_seer_scanner_automation(
self, mock_seer_ack: MagicMock
) -> None:
"""Test that fetch_issue_summary returns None when seer_scanner_automation is disabled"""
self.project.update_option("sentry:seer_scanner_automation", False)
mock_seer_ack.return_value = True
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
def test_fetch_issue_summary_without_org_acknowledgement(
self, mock_seer_ack: MagicMock
) -> None:
"""Test that fetch_issue_summary returns None when org hasn't acknowledged Seer"""
self.project.update_option("sentry:seer_scanner_automation", True)
mock_seer_ack.return_value = False
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_without_enable_seer_enhanced_alerts(
self, mock_has_budget: MagicMock, mock_seer_ack: MagicMock
) -> None:
"""Test that fetch_issue_summary returns None when enable_seer_enhanced_alerts is disabled"""
# Set up all the required conditions to pass except enable_seer_enhanced_alerts
self.project.update_option("sentry:seer_scanner_automation", True)
self.organization.update_option("sentry:hide_ai_features", False)
self.organization.update_option("sentry:enable_seer_enhanced_alerts", False)
mock_seer_ack.return_value = True
result = fetch_issue_summary(self.group)
assert result is None
# Verify that budget check wasn't called since we returned early
mock_has_budget.assert_not_called()
def test_fetch_issue_summary_without_gen_ai_features(self) -> None:
"""Test that fetch_issue_summary returns None without gen-ai-features flag"""
self.project.update_option("sentry:seer_scanner_automation", True)
# No @with_feature decorator, so gen-ai-features is disabled
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
def test_fetch_issue_summary_rate_limited(
self, mock_rate_limited: MagicMock, mock_seer_ack: MagicMock
) -> None:
"""Test that fetch_issue_summary returns None when rate limited"""
self.project.update_option("sentry:seer_scanner_automation", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = True
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_no_budget(
self, mock_has_budget: MagicMock, mock_rate_limited: MagicMock, mock_seer_ack: MagicMock
) -> None:
"""Test that fetch_issue_summary returns None when no budget available"""
self.project.update_option("sentry:seer_scanner_automation", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = False
mock_has_budget.return_value = False
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_issue_summary")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_timeout_error(
self, mock_has_budget, mock_rate_limited, mock_seer_ack, mock_get_issue_summary
):
"""Test that fetch_issue_summary returns None when timeout occurs"""
self.project.update_option("sentry:seer_scanner_automation", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = False
mock_has_budget.return_value = True
# Mock timeout exception
import concurrent.futures
mock_get_issue_summary.side_effect = concurrent.futures.TimeoutError()
result = fetch_issue_summary(self.group)
assert result is None
@with_feature("organizations:gen-ai-features")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_issue_summary")
@patch("sentry.integrations.utils.issue_summary_for_alerts.get_seer_org_acknowledgement")
@patch("sentry.integrations.utils.issue_summary_for_alerts.is_seer_scanner_rate_limited")
@patch("sentry.quotas.backend.has_available_reserved_budget")
def test_fetch_issue_summary_api_error(
self, mock_has_budget, mock_rate_limited, mock_seer_ack, mock_get_issue_summary
):
"""Test that fetch_issue_summary returns None when API returns error status"""
self.project.update_option("sentry:seer_scanner_automation", True)
mock_seer_ack.return_value = True
mock_rate_limited.return_value = False
mock_has_budget.return_value = True
# Mock error response
mock_get_issue_summary.return_value = (None, 500)
result = fetch_issue_summary(self.group)
assert result is None
| FetchIssueSummaryTest |
python | numba__numba | numba/cuda/dispatcher.py | {
"start": 18402,
"end": 19839
} | class ____:
def __init__(self, dispatcher, griddim, blockdim, stream, sharedmem):
self.dispatcher = dispatcher
self.griddim = griddim
self.blockdim = blockdim
self.stream = stream
self.sharedmem = sharedmem
if config.CUDA_LOW_OCCUPANCY_WARNINGS:
# Warn when the grid has fewer than 128 blocks. This number is
# chosen somewhat heuristically - ideally the minimum is 2 times
# the number of SMs, but the number of SMs varies between devices -
# some very small GPUs might only have 4 SMs, but an H100-SXM5 has
# 132. In general kernels should be launched with large grids
# (hundreds or thousands of blocks), so warning when fewer than 128
# blocks are used will likely catch most beginner errors, where the
# grid tends to be very small (single-digit or low tens of blocks).
min_grid_size = 128
grid_size = griddim[0] * griddim[1] * griddim[2]
if grid_size < min_grid_size:
msg = (f"Grid size {grid_size} will likely result in GPU "
"under-utilization due to low occupancy.")
warn(NumbaPerformanceWarning(msg))
def __call__(self, *args):
return self.dispatcher.call(args, self.griddim, self.blockdim,
self.stream, self.sharedmem)
| _LaunchConfiguration |
python | spack__spack | var/spack/test_repos/spack_repo/tutorial/packages/netlib_lapack/package.py | {
"start": 220,
"end": 7429
} | class ____(CMakePackage):
"""LAPACK version 3.X is a comprehensive FORTRAN library that does
linear algebra operations including matrix inversions, least squared
solutions to linear sets of equations, eigenvector analysis, singular
value decomposition, etc. It is a very comprehensive and reputable
package that has found extensive use in the scientific community.
"""
homepage = "http://www.netlib.org/lapack/"
url = "http://www.netlib.org/lapack/lapack-3.5.0.tgz"
version(
"3.8.0",
"96591affdbf58c450d45c1daa540dbd2",
url="http://www.netlib.org/lapack/lapack-3.8.0.tar.gz",
)
version("3.7.1", md5="dcdeeed73de152c4643ccc5b1aeb453c")
version("3.7.0", md5="697bb8d67c7d336a0f339cc9dd0fa72f")
version("3.6.1", md5="421b2cb72e15f237e144428f9c460ee0")
version("3.6.0", md5="f2f6c67134e851fe189bb3ca1fbb5101")
version("3.5.0", md5="b1d3e3e425b2e44a06760ff173104bdf")
version("3.4.2", md5="61bf1a8a4469d4bdb7604f5897179478")
version("3.4.1", md5="44c3869c38c8335c2b9c2a8bb276eb55")
version("3.4.0", md5="02d5706ec03ba885fc246e5fa10d8c70")
version("3.3.1", md5="d0d533ec9a5b74933c2a1e84eedc58b4")
variant("shared", default=True, description="Build shared library version")
variant("external-blas", default=False, description="Build lapack with an external blas")
variant("lapacke", default=True, description="Activates the build of the LAPACKE C interface")
variant("xblas", default=False, description="Builds extended precision routines using XBLAS")
patch("ibm-xl.patch", when="@3.7: %xl")
patch("ibm-xl.patch", when="@3.7: %xl_r")
# https://github.com/Reference-LAPACK/lapack/issues/228
# TODO: update 'when' once the version of lapack
# containing the fix is released and added to Spack.
patch("undefined_declarations.patch", when="@3.8.0:")
# https://github.com/Reference-LAPACK/lapack/pull/268
# TODO: update 'when' once the version of lapack
# containing the fix is released and added to Spack.
patch("testing.patch", when="@3.7.0:")
# virtual dependency
provides("blas", when="~external-blas")
provides("lapack")
depends_on("blas", when="+external-blas")
depends_on("netlib-xblas+fortran+plain_blas", when="+xblas")
depends_on("python@2.7:", type="test")
# We need to run every phase twice in order to get static and shared
# versions of the libraries. When ~shared, we run the default
# implementations of the CMakePackage's phases and get only one building
# directory 'spack-build-static' with -DBUILD_SHARED_LIBS:BOOL=OFF (see
# implementations of self.build_directory and self.cmake_args() below).
# When +shared, we run the overridden methods for the phases, each
# running the default implementation twice with different values for
# self._building_shared. As a result, we get two building directories:
# 'spack-build-static' with -DBUILD_SHARED_LIBS:BOOL=OFF and
# 'spack-build-shared' with -DBUILD_SHARED_LIBS:BOOL=ON.
_building_shared = False
def patch(self):
# Fix cblas CMakeLists.txt -- has wrong case for subdirectory name.
if self.spec.satisfies("@3.6.0:"):
filter_file(
"${CMAKE_CURRENT_SOURCE_DIR}/CMAKE/",
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/",
"CBLAS/CMakeLists.txt",
string=True,
)
@property
def blas_libs(self):
shared = True if "+shared" in self.spec else False
query_parameters = self.spec.last_query.extra_parameters
query2libraries = {
tuple(): ["libblas"],
("c", "fortran"): ["libcblas", "libblas"],
("c",): ["libcblas"],
("fortran",): ["libblas"],
}
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(libraries, root=self.prefix, shared=shared, recursive=True)
# TUTORIAL: add a proper `lapack_lib` property, along the lines
# of the `blas_lib` property above. The library that provides
# the lapack API is called `liblapack`.
@property
def headers(self):
include_dir = self.spec.prefix.include
cblas_h = join_path(include_dir, "cblas.h")
lapacke_h = join_path(include_dir, "lapacke.h")
return HeaderList([cblas_h, lapacke_h])
@property
def build_directory(self):
return join_path(
self.stage.source_path,
"spack-build-shared" if self._building_shared else "spack-build-static",
)
def cmake_args(self):
args = ["-DBUILD_SHARED_LIBS:BOOL=" + ("ON" if self._building_shared else "OFF")]
if self.spec.satisfies("+lapacke"):
args.extend(["-DLAPACKE:BOOL=ON", "-DLAPACKE_WITH_TMG:BOOL=ON"])
else:
args.extend(["-DLAPACKE:BOOL=OFF", "-DLAPACKE_WITH_TMG:BOOL=OFF"])
if self.spec.satisfies("@3.6.0:"):
args.append("-DCBLAS=ON") # always build CBLAS
if self.spec.satisfies("%intel"):
# Intel compiler finds serious syntax issues when trying to
# build CBLAS and LapackE
args.extend(["-DCBLAS=OFF", "-DLAPACKE:BOOL=OFF"])
if self.spec.satisfies("%xl") or self.spec.satisfies("%xl_r"):
# use F77 compiler if IBM XL
args.extend(
[
"-DCMAKE_Fortran_COMPILER=" + self.compiler.f77,
"-DCMAKE_Fortran_FLAGS="
+ (" ".join(self.spec.compiler_flags["fflags"]))
+ " -O3 -qnohot",
]
)
# deprecated routines are commonly needed by, for example, suitesparse
# Note that OpenBLAS spack is built with deprecated routines
args.append("-DBUILD_DEPRECATED:BOOL=ON")
if self.spec.satisfies("+external-blas"):
args.extend(
[
"-DUSE_OPTIMIZED_BLAS:BOOL=ON",
"-DBLAS_LIBRARIES:PATH=" + self.spec["blas"].libs.joined(";"),
]
)
if self.spec.satisfies("+xblas"):
args.extend(
[
"-DXBLAS_INCLUDE_DIR=" + self.spec["netlib-xblas"].prefix.include,
"-DXBLAS_LIBRARY=" + self.spec["netlib-xblas"].libs.joined(";"),
]
)
args.append("-DBUILD_TESTING:BOOL=" + ("ON" if self.run_tests else "OFF"))
return args
# Build, install, and check both static and shared versions of the
# libraries when +shared
@when("+shared")
def cmake(self, spec, prefix):
for self._building_shared in (False, True):
super().cmake(spec, prefix)
@when("+shared")
def build(self, spec, prefix):
for self._building_shared in (False, True):
super().build(spec, prefix)
@when("+shared")
def install(self, spec, prefix):
for self._building_shared in (False, True):
super().install(spec, prefix)
@when("+shared")
def check(self):
for self._building_shared in (False, True):
super().check()
| NetlibLapack |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/object_identity.py | {
"start": 2594,
"end": 3135
} | class ____(_ObjectIdentityWrapper):
"""Reference that refers an object.
```python
x = [1]
y = [1]
x_ref1 = Reference(x)
x_ref2 = Reference(x)
y_ref2 = Reference(y)
print(x_ref1 == x_ref2)
==> True
print(x_ref1 == y)
==> False
```
"""
__slots__ = ()
# Disabling super class' unwrapped field.
unwrapped = property()
def deref(self):
"""Returns the referenced object.
```python
x_ref = Reference(x)
print(x is x_ref.deref())
==> True
```
"""
return self._wrapped
| Reference |
python | gevent__gevent | src/gevent/tests/lock_tests.py | {
"start": 482,
"end": 1507
} | class ____(object):
"""
A bunch of threads.
"""
def __init__(self, f, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.n = n
self.started = []
self.finished = []
self._can_exit = not wait_before_exit
def task():
tid = get_ident()
self.started.append(tid)
try:
f()
finally:
self.finished.append(tid)
while not self._can_exit:
_wait()
for _ in range(n):
start_new_thread(task, ())
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit = True
| Bunch |
python | pytorch__pytorch | torch/nn/modules/adaptive.py | {
"start": 393,
"end": 12606
} | class ____(Module):
(
"""Efficient softmax approximation.
As described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
Moustapha Ciss\u00e9, David Grangier, and Herv\u00e9 J\u00e9gou
<https://arxiv.org/abs/1609.04309>`__.
"""
r"""
Adaptive softmax is an approximate strategy for training models with large
output spaces. It is most effective when the label distribution is highly
imbalanced, for example in natural language modelling, where the word
frequency distribution approximately follows the `Zipf's law`_.
Adaptive softmax partitions the labels into several clusters, according to
their frequency. These clusters may contain different number of targets
each.
Additionally, clusters containing less frequent labels assign lower
dimensional embeddings to those labels, which speeds up the computation.
For each minibatch, only clusters for which at least one target is
present are evaluated.
The idea is that the clusters which are accessed frequently
(like the first one, containing most frequent labels), should also be cheap
to compute -- that is, contain a small number of assigned labels.
We highly recommend taking a look at the original paper for more details.
* :attr:`cutoffs` should be an ordered Sequence of integers sorted
in the increasing order.
It controls number of clusters and the partitioning of targets into
clusters. For example setting ``cutoffs = [10, 100, 1000]``
means that first `10` targets will be assigned
to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be
assigned to the first cluster, and targets `101, 102, ..., 1000` will be
assigned to the second cluster, while targets
`1001, 1002, ..., n_classes - 1` will be assigned
to the last, third cluster.
* :attr:`div_value` is used to compute the size of each additional cluster,
which is given as
:math:`\left\lfloor\frac{\texttt{in\_features}}{\texttt{div\_value}^{idx}}\right\rfloor`,
where :math:`idx` is the cluster index (with clusters
for less frequent words having larger indices,
and indices starting from :math:`1`).
* :attr:`head_bias` if set to True, adds a bias term to the 'head' of the
adaptive softmax. See paper for details. Set to False in the official
implementation.
.. warning::
Labels passed as inputs to this module should be sorted according to
their frequency. This means that the most frequent label should be
represented by the index `0`, and the least frequent
label should be represented by the index `n_classes - 1`.
.. note::
This module returns a ``NamedTuple`` with ``output``
and ``loss`` fields. See further documentation for details.
.. note::
To compute log-probabilities for all classes, the ``log_prob``
method can be used.
Args:
in_features (int): Number of features in the input tensor
n_classes (int): Number of classes in the dataset
cutoffs (Sequence): Cutoffs used to assign targets to their buckets
div_value (float, optional): value used as an exponent to compute sizes
of the clusters. Default: 4.0
head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the
adaptive softmax. Default: ``False``
Returns:
``NamedTuple`` with ``output`` and ``loss`` fields:
* **output** is a Tensor of size ``N`` containing computed target
log probabilities for each example
* **loss** is a Scalar representing the computed negative
log likelihood loss
Shape:
- input: :math:`(N, \texttt{in\_features})` or :math:`(\texttt{in\_features})`
- target: :math:`(N)` or :math:`()` where each value satisfies :math:`0 <= \texttt{target[i]} <= \texttt{n\_classes}`
- output1: :math:`(N)` or :math:`()`
- output2: ``Scalar``
.. _Zipf's law: https://en.wikipedia.org/wiki/Zipf%27s_law
"""
)
in_features: int
n_classes: int
cutoffs: list[int]
div_value: float
head_bias: bool
head: Linear
tail: ModuleList
def __init__(
self,
in_features: int,
n_classes: int,
cutoffs: Sequence[int],
div_value: float = 4.0,
head_bias: bool = False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
cutoffs = list(cutoffs)
if len(cutoffs) == 0:
raise ValueError("cutoffs should be a sequence of length larger than 0")
if (
(cutoffs != sorted(cutoffs))
or (min(cutoffs) <= 0)
or (max(cutoffs) > (n_classes - 1))
or (len(set(cutoffs)) != len(cutoffs))
or any(int(c) != c for c in cutoffs)
):
raise ValueError(
"cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1"
)
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.div_value = div_value
self.head_bias = head_bias
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.head = Linear(
self.in_features, self.head_size, bias=self.head_bias, **factory_kwargs
)
self.tail = ModuleList()
for i in range(self.n_clusters):
hsz = int(self.in_features // (self.div_value ** (i + 1)))
osz = self.cutoffs[i + 1] - self.cutoffs[i]
projection = Sequential(
Linear(self.in_features, hsz, bias=False, **factory_kwargs),
Linear(hsz, osz, bias=False, **factory_kwargs),
)
self.tail.append(projection)
def reset_parameters(self) -> None:
"""
Resets parameters based on their initialization used in ``__init__``.
"""
self.head.reset_parameters()
for i2h, h2o in self.tail: # type: ignore[misc]
i2h.reset_parameters() # type: ignore[has-type]
h2o.reset_parameters() # type: ignore[has-type]
def forward(self, input_: Tensor, target_: Tensor) -> _ASMoutput:
"""
Runs the forward pass.
"""
targ_dim = target_.dim()
if targ_dim == 1:
if input_.size(0) != target_.size(0):
raise RuntimeError(
"Input and target should have the same size in the batch dimension."
)
if input_.dim() != 2:
raise RuntimeError(
"1D target tensor expects 2D input tensors, "
"but found inputs with size",
input_.size(),
)
elif targ_dim == 0:
if input_.dim() != 1:
raise RuntimeError(
"0D target tensor expects 1D input tensors, "
"but found inputs with size",
input_.size(),
)
else:
raise RuntimeError(
"0D or 1D target tensor expected, multi-target not supported"
)
is_batched = targ_dim > 0
input = input_ if is_batched else input_.unsqueeze(0)
target = target_ if is_batched else target_.unsqueeze(0)
used_rows = 0
batch_size = target.size(0)
output = input.new_zeros(batch_size)
gather_inds = target.new_empty(batch_size)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
low_idx = cutoff_values[i]
high_idx = cutoff_values[i + 1]
target_mask = (target >= low_idx) & (target < high_idx)
row_indices = target_mask.nonzero().squeeze()
if row_indices.numel() == 0:
continue
if i == 0:
gather_inds.index_copy_(0, row_indices, target[target_mask])
else:
relative_target = target[target_mask] - low_idx
input_subset = input.index_select(0, row_indices)
cluster_output = self.tail[i - 1](input_subset)
cluster_index = self.shortlist_size + i - 1
gather_inds.index_fill_(0, row_indices, cluster_index)
cluster_logprob = F.log_softmax(cluster_output, dim=1)
local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1))
output.index_copy_(0, row_indices, local_logprob.squeeze(1))
used_rows += row_indices.numel()
if used_rows != batch_size:
raise RuntimeError(
f"Target values should be in [0, {self.n_classes - 1}], "
f"but values in range [{target.min().item()}, {target.max().item()}] "
"were found. "
)
head_output = self.head(input)
head_logprob = F.log_softmax(head_output, dim=1)
output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze()
loss = (-output).mean()
if not is_batched:
output = output.squeeze(0)
return _ASMoutput(output, loss)
def _get_full_log_prob(self, input, head_output):
"""Given input tensor, and output of ``self.head``, compute the log of the full distribution."""
out = input.new_empty((head_output.size(0), self.n_classes))
head_logprob = F.log_softmax(head_output, dim=1)
out[:, : self.shortlist_size] = head_logprob[:, : self.shortlist_size]
for i, (start_idx, stop_idx) in enumerate(itertools.pairwise(self.cutoffs)):
cluster_output = self.tail[i](input)
cluster_logprob = F.log_softmax(cluster_output, dim=1)
output_logprob = cluster_logprob + head_logprob[
:, self.shortlist_size + i
].unsqueeze(1)
out[:, start_idx:stop_idx] = output_logprob
return out
def log_prob(self, input: Tensor) -> Tensor:
r"""Compute log probabilities for all :math:`\texttt{n\_classes}`.
Args:
input (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= \texttt{n\_classes}`, where :math:`\texttt{n\_classes}` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N, \texttt{n\_classes})`
"""
head_output = self.head(input)
return self._get_full_log_prob(input, head_output)
def predict(self, input: Tensor) -> Tensor:
r"""Return the class with the highest probability for each example in the input minibatch.
This is equivalent to ``self.log_prob(input).argmax(dim=1)``, but is more efficient in some cases.
Args:
input (Tensor): a minibatch of examples
Returns:
output (Tensor): a class with the highest probability for each example
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N)`
"""
head_output = self.head(input)
output = torch.argmax(head_output, dim=1)
not_in_shortlist = output >= self.shortlist_size
all_in_shortlist = not (not_in_shortlist.any())
if all_in_shortlist:
return output
elif not_in_shortlist.all():
log_prob = self._get_full_log_prob(input, head_output)
return torch.argmax(log_prob, dim=1)
else:
log_prob = self._get_full_log_prob(
input[not_in_shortlist], head_output[not_in_shortlist]
)
output[not_in_shortlist] = torch.argmax(log_prob, dim=1)
return output
| AdaptiveLogSoftmaxWithLoss |
python | kamyu104__LeetCode-Solutions | Python/extract-kth-character-from-the-rope-tree.py | {
"start": 190,
"end": 621
} | class ____(object):
def getKthCharacter(self, root, k):
"""
:type root: Optional[RopeTreeNode]
:type k: int
:rtype: str
"""
while root.len:
l = max(root.left.len, len(root.left.val)) if root.left else 0
if k <= l:
root = root.left
else:
k -= l
root = root.right
return root.val[k-1]
| Solution |
python | apache__airflow | providers/standard/tests/unit/standard/decorators/test_external_python.py | {
"start": 2646,
"end": 10683
} | class ____:
@pytest.mark.parametrize(
"serializer",
[
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
],
)
def test_with_serializer_works(self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill):
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f():
"""Import cloudpickle/dill to double-check it is installed ."""
import cloudpickle # noqa: F401
import dill # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
],
)
def test_with_templated_python_serializer(
self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill
):
# add template that produces empty string when rendered
templated_python_with_cloudpickle = venv_python_with_cloudpickle_and_dill + "{{ '' }}"
@task.external_python(python=templated_python_with_cloudpickle, serializer=serializer)
def f():
"""Import cloudpickle/dill to double-check it is installed ."""
import cloudpickle # noqa: F401
import dill # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
],
)
def test_no_advanced_serializer_installed(self, serializer, dag_maker, venv_python):
@task.external_python(python=venv_python, serializer=serializer)
def f():
pass
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
with pytest.raises(CalledProcessError):
ti.run()
def test_exception_raises_error(self, dag_maker, venv_python):
@task.external_python(python=venv_python)
def f():
raise Exception
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
with pytest.raises(CalledProcessError):
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_with_args(self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill):
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f(a, b, c=False, d=False):
if a == 0 and b == 1 and c and not d:
return True
raise Exception
with dag_maker(serialized=True):
f(0, 1, c=True)
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_return_none(self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill):
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f():
return None
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_nonimported_as_arg(self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill):
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f(_):
return None
with dag_maker(serialized=True):
f(datetime.datetime.now(tz=datetime.timezone.utc))
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_marking_external_python_task_as_setup(
self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill
):
@setup
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["f"]
assert setup_task.is_setup
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_marking_external_python_task_as_teardown(
self, serializer, dag_maker, venv_python_with_cloudpickle_and_dill
):
@teardown
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
ti = dr.get_task_instances()[0]
ti.run()
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
@pytest.mark.parametrize("on_failure_fail_dagrun", [True, False])
def test_marking_external_python_task_as_teardown_with_on_failure_fail(
self, serializer, dag_maker, on_failure_fail_dagrun, venv_python_with_cloudpickle_and_dill
):
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
@task.external_python(python=venv_python_with_cloudpickle_and_dill, serializer=serializer)
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun is on_failure_fail_dagrun
ti = dr.get_task_instances()[0]
ti.run()
| TestExternalPythonDecorator |
python | kamyu104__LeetCode-Solutions | Python/count-subtrees-with-max-distance-between-cities.py | {
"start": 2242,
"end": 3673
} | class ____(object):
def countSubgraphsForEachDiameter(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
def popcount(mask):
count = 0
while mask:
mask &= mask-1
count += 1
return count
def bfs(adj, mask, start):
q = collections.deque([(start, 0)])
lookup = 1<<start
count = popcount(mask)-1
u, d = None, None
while q:
u, d = q.popleft()
for v in adj[u]:
if not (mask&(1<<v)) or (lookup&(1<<v)):
continue
lookup |= 1<<v
count -= 1
q.append((v, d+1))
return count == 0, u, d
def max_distance(n, edges, adj, mask):
is_valid, farthest, _ = bfs(adj, mask, int(math.log(mask&-mask, 2)))
return bfs(adj, mask, farthest)[-1] if is_valid else 0
adj = collections.defaultdict(list)
for u, v in edges:
u -= 1
v -= 1
adj[u].append(v)
adj[v].append(u)
result = [0]*(n-1)
for mask in xrange(1, 2**n):
max_d = max_distance(n, edges, adj, mask)
if max_d-1 >= 0:
result[max_d-1] += 1
return result
| Solution2 |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/Flowchart.py | {
"start": 21603,
"end": 22781
} | class ____(GraphicsObject):
def __init__(self, chart):
GraphicsObject.__init__(self)
self.chart = chart ## chart is an instance of Flowchart()
self.updateTerminals()
def updateTerminals(self):
self.terminals = {}
bounds = self.boundingRect()
inp = self.chart.inputs()
dy = bounds.height() / (len(inp)+1)
y = dy
for n, t in inp.items():
item = t.graphicsItem()
self.terminals[n] = item
item.setParentItem(self)
item.setAnchor(bounds.width(), y)
y += dy
out = self.chart.outputs()
dy = bounds.height() / (len(out)+1)
y = dy
for n, t in out.items():
item = t.graphicsItem()
self.terminals[n] = item
item.setParentItem(self)
item.setAnchor(0, y)
y += dy
def boundingRect(self):
#print "FlowchartGraphicsItem.boundingRect"
return QtCore.QRectF()
def paint(self, p, *args):
#print "FlowchartGraphicsItem.paint"
pass
#p.drawRect(self.boundingRect())
| FlowchartGraphicsItem |
python | huggingface__transformers | src/transformers/models/plbart/modular_plbart.py | {
"start": 17240,
"end": 18678
} | class ____(BartForCausalLM):
@auto_docstring
def forward(**super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, PLBartForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
>>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
super().forward(**super_kwargs)
__all__ = [
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
| PLBartForCausalLM |
python | realpython__materials | inheritance-and-composition/inheritance/productivity.py | {
"start": 409,
"end": 523
} | class ____:
def work(self, hours):
return f"expends {hours} hours doing office paperwork."
| SecretaryRole |
python | PyCQA__pylint | tests/functional/b/base_init_vars.py | {
"start": 100,
"end": 309
} | class ____:
"""A simple base class
"""
def __init__(self):
self.base_var = {}
def met(self):
"""yo"""
def meeting(self, with_):
"""ye"""
return with_
| BaseClass |
python | sympy__sympy | sympy/stats/joint_rv_types.py | {
"start": 10551,
"end": 12400
} | class ____(JointDistribution):
_argnames = ('mu', 'shape_mat', 'dof')
is_Continuous=True
@property
def set(self):
k = self.mu.shape[0]
return S.Reals**k
@staticmethod
def check(mu, sigma, v):
_value_check(mu.shape[0] == sigma.shape[0],
"Size of the location vector and shape matrix are incorrect.")
# check if covariance matrix is positive definite or not.
if not isinstance(sigma, MatrixSymbol):
_value_check(sigma.is_positive_definite,
"The shape matrix must be positive definite. ")
def pdf(self, *args):
mu, sigma = self.mu, self.shape_mat
v = S(self.dof)
k = S(mu.shape[0])
sigma_inv = sigma.inv()
args = ImmutableMatrix(args)
x = args - mu
return gamma((k + v)/2)/(gamma(v/2)*(v*pi)**(k/2)*sqrt(det(sigma)))\
*(1 + 1/v*(x.transpose()*sigma_inv*x)[0])**((-v - k)/2)
def MultivariateT(syms, mu, sigma, v):
"""
Creates a joint random variable with multivariate T-distribution.
Parameters
==========
syms : A symbol/str
For identifying the random variable.
mu : A list/matrix
Representing the location vector
sigma : The shape matrix for the distribution
Examples
========
>>> from sympy.stats import density, MultivariateT
>>> from sympy import Symbol
>>> x = Symbol("x")
>>> X = MultivariateT("x", [1, 1], [[1, 0], [0, 1]], 2)
>>> density(X)(1, 2)
2/(9*pi)
Returns
=======
RandomSymbol
"""
return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v)
#-------------------------------------------------------------------------------
# Multivariate Normal Gamma distribution ---------------------------------------
| MultivariateTDistribution |
python | keon__algorithms | tests/test_dp.py | {
"start": 5262,
"end": 5846
} | class ____(unittest.TestCase):
def test_kfactor(self):
# Test 1
n1 = 4
k1 = 1
self.assertEqual(find_k_factor(n1, k1), 1)
# Test 2
n2 = 7
k2 = 1
self.assertEqual(find_k_factor(n2, k2), 70302)
# Test 3
n3 = 10
k3 = 2
self.assertEqual(find_k_factor(n3, k3), 74357)
# Test 4
n4 = 8
k4 = 2
self.assertEqual(find_k_factor(n4, k4), 53)
# Test 5
n5 = 9
k5 = 1
self.assertEqual(find_k_factor(n5, k5), 71284044)
| Test_dp_K_Factor |
python | pypa__setuptools | setuptools/_vendor/backports/tarfile/__init__.py | {
"start": 10300,
"end": 10389
} | class ____(HeaderError):
"""Exception for invalid headers."""
pass
| InvalidHeaderError |
python | huggingface__transformers | src/transformers/models/sew/modular_sew.py | {
"start": 1582,
"end": 1650
} | class ____(Wav2Vec2LayerNormConvLayer):
pass
| SEWLayerNormConvLayer |
python | walkccc__LeetCode | solutions/1589. Maximum Sum Obtained of Any Permutation/1589.py | {
"start": 0,
"end": 514
} | class ____:
def maxSumRangeQuery(self, nums: list[int], requests: list[list[int]]) -> int:
MOD = 1_000_000_007
ans = 0
# count[i] := the number of times nums[i] has been requested
count = [0] * len(nums)
for start, end in requests:
count[start] += 1
if end + 1 < len(nums):
count[end + 1] -= 1
for i in range(1, len(nums)):
count[i] += count[i - 1]
for num, c in zip(sorted(nums), sorted(count)):
ans += num * c
ans %= MOD
return ans
| Solution |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 56182,
"end": 57411
} | class ____(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.source_port = socket_helper.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
| SourceAddressTest |
python | aimacode__aima-python | deep_learning4e.py | {
"start": 2257,
"end": 2420
} | class ____(Activation):
def function(self, x):
return np.exp(x) / np.sum(np.exp(x))
def derivative(self, x):
return np.ones_like(x)
| SoftMax |
python | Textualize__textual | src/textual/command.py | {
"start": 14631,
"end": 14911
} | class ____(Input):
"""The command palette input control."""
DEFAULT_CSS = """
CommandInput, CommandInput:focus {
border: blank;
width: 1fr;
padding-left: 0;
background: transparent;
background-tint: 0%;
}
"""
| CommandInput |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 1408,
"end": 1642
} | class ____(RequestHandler):
def prepare(self):
self.write("redirects can have bodies too")
self.redirect(
self.get_argument("url"), status=int(self.get_argument("status", "302"))
)
| RedirectHandler |
python | ipython__ipython | IPython/core/completer.py | {
"start": 47365,
"end": 63607
} | class ____(enum.Flag):
"""Represent state of the key match in context of other possible matches.
- given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
- given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
- given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
- given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
"""
BASELINE = 0
END_OF_ITEM = enum.auto()
END_OF_TUPLE = enum.auto()
IN_TUPLE = enum.auto()
def _parse_tokens(c):
"""Parse tokens even if there is an error."""
tokens = []
token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
while True:
try:
tokens.append(next(token_generator))
except tokenize.TokenError:
return tokens
except StopIteration:
return tokens
def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
"""Match any valid Python numeric literal in a prefix of dictionary keys.
References:
- https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
- https://docs.python.org/3/library/tokenize.html
"""
if prefix[-1].isspace():
# if user typed a space we do not have anything to complete
# even if there was a valid number token before
return None
tokens = _parse_tokens(prefix)
rev_tokens = reversed(tokens)
skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
number = None
for token in rev_tokens:
if token.type in skip_over:
continue
if number is None:
if token.type == tokenize.NUMBER:
number = token.string
continue
else:
# we did not match a number
return None
if token.type == tokenize.OP:
if token.string == ",":
break
if token.string in {"+", "-"}:
number = token.string + number
else:
return None
return number
_INT_FORMATS = {
"0b": bin,
"0o": oct,
"0x": hex,
}
def match_dict_keys(
keys: list[Union[str, bytes, tuple[Union[str, bytes], ...]]],
prefix: str,
delims: str,
extra_prefix: Optional[tuple[Union[str, bytes], ...]] = None,
) -> tuple[str, int, dict[str, _DictKeyState]]:
"""Used by dict_key_matches, matching the prefix to a list of keys
Parameters
----------
keys
list of keys in dictionary currently being completed.
prefix
Part of the text already typed by the user. E.g. `mydict[b'fo`
delims
String of delimiters to consider when finding the current key.
extra_prefix : optional
Part of the text already typed in multi-key index cases. E.g. for
`mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
Returns
-------
A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
``quote`` being the quote that need to be used to close current string.
``token_start`` the position where the replacement should start occurring,
``matches`` a dictionary of replacement/completion keys on keys and values
indicating whether the state.
"""
prefix_tuple = extra_prefix if extra_prefix else ()
prefix_tuple_size = sum(
[
# for pandas, do not count slices as taking space
not isinstance(k, slice)
for k in prefix_tuple
]
)
text_serializable_types = (str, bytes, int, float, slice)
def filter_prefix_tuple(key):
# Reject too short keys
if len(key) <= prefix_tuple_size:
return False
# Reject keys which cannot be serialised to text
for k in key:
if not isinstance(k, text_serializable_types):
return False
# Reject keys that do not match the prefix
for k, pt in zip(key, prefix_tuple):
if k != pt and not isinstance(pt, slice):
return False
# All checks passed!
return True
filtered_key_is_final: dict[
Union[str, bytes, int, float], _DictKeyState
] = defaultdict(lambda: _DictKeyState.BASELINE)
for k in keys:
# If at least one of the matches is not final, mark as undetermined.
# This can happen with `d = {111: 'b', (111, 222): 'a'}` where
# `111` appears final on first match but is not final on the second.
if isinstance(k, tuple):
if filter_prefix_tuple(k):
key_fragment = k[prefix_tuple_size]
filtered_key_is_final[key_fragment] |= (
_DictKeyState.END_OF_TUPLE
if len(k) == prefix_tuple_size + 1
else _DictKeyState.IN_TUPLE
)
elif prefix_tuple_size > 0:
# we are completing a tuple but this key is not a tuple,
# so we should ignore it
pass
else:
if isinstance(k, text_serializable_types):
filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
filtered_keys = filtered_key_is_final.keys()
if not prefix:
return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
quote_match = re.search("(?:\"|')", prefix)
is_user_prefix_numeric = False
if quote_match:
quote = quote_match.group()
valid_prefix = prefix + quote
try:
prefix_str = literal_eval(valid_prefix)
except Exception:
return "", 0, {}
else:
# If it does not look like a string, let's assume
# we are dealing with a number or variable.
number_match = _match_number_in_dict_key_prefix(prefix)
# We do not want the key matcher to suggest variable names so we yield:
if number_match is None:
# The alternative would be to assume that user forgort the quote
# and if the substring matches, suggest adding it at the start.
return "", 0, {}
prefix_str = number_match
is_user_prefix_numeric = True
quote = ""
pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
token_match = re.search(pattern, prefix, re.UNICODE)
assert token_match is not None # silence mypy
token_start = token_match.start()
token_prefix = token_match.group()
matched: dict[str, _DictKeyState] = {}
str_key: Union[str, bytes]
for key in filtered_keys:
if isinstance(key, (int, float)):
# User typed a number but this key is not a number.
if not is_user_prefix_numeric:
continue
str_key = str(key)
if isinstance(key, int):
int_base = prefix_str[:2].lower()
# if user typed integer using binary/oct/hex notation:
if int_base in _INT_FORMATS:
int_format = _INT_FORMATS[int_base]
str_key = int_format(key)
else:
# User typed a string but this key is a number.
if is_user_prefix_numeric:
continue
str_key = key
try:
if not str_key.startswith(prefix_str):
continue
except (AttributeError, TypeError, UnicodeError):
# Python 3+ TypeError on b'a'.startswith('a') or vice-versa
continue
# reformat remainder of key to begin with prefix
rem = str_key[len(prefix_str) :]
# force repr wrapped in '
rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
if quote == '"':
# The entered prefix is quoted with ",
# but the match is quoted with '.
# A contained " hence needs escaping for comparison:
rem_repr = rem_repr.replace('"', '\\"')
# then reinsert prefix from start of token
match = "%s%s" % (token_prefix, rem_repr)
matched[match] = filtered_key_is_final[key]
return quote, token_start, matched
def cursor_to_position(text:str, line:int, column:int)->int:
"""
Convert the (line,column) position of the cursor in text to an offset in a
string.
Parameters
----------
text : str
The text in which to calculate the cursor offset
line : int
Line of the cursor; 0-indexed
column : int
Column of the cursor 0-indexed
Returns
-------
Position of the cursor in ``text``, 0-indexed.
See Also
--------
position_to_cursor : reciprocal of this function
"""
lines = text.split('\n')
assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
return sum(len(line) + 1 for line in lines[:line]) + column
def position_to_cursor(text: str, offset: int) -> tuple[int, int]:
"""
Convert the position of the cursor in text (0 indexed) to a line
number(0-indexed) and a column number (0-indexed) pair
Position should be a valid position in ``text``.
Parameters
----------
text : str
The text in which to calculate the cursor offset
offset : int
Position of the cursor in ``text``, 0-indexed.
Returns
-------
(line, column) : (int, int)
Line of the cursor; 0-indexed, column of the cursor 0-indexed
See Also
--------
cursor_to_position : reciprocal of this function
"""
assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
before = text[:offset]
blines = before.split('\n') # ! splitnes trim trailing \n
line = before.count('\n')
col = len(blines[-1])
return line, col
def _safe_isinstance(obj, module, class_name, *attrs):
"""Checks if obj is an instance of module.class_name if loaded
"""
if module in sys.modules:
m = sys.modules[module]
for attr in [class_name, *attrs]:
m = getattr(m, attr)
return isinstance(obj, m)
@context_matcher()
def back_unicode_name_matcher(context: CompletionContext):
"""Match Unicode characters back to Unicode name
Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
"""
fragment, matches = back_unicode_name_matches(context.text_until_cursor)
return _convert_matcher_v1_result_to_v2(
matches, type="unicode", fragment=fragment, suppress_if_matches=True
)
def back_unicode_name_matches(text: str) -> tuple[str, Sequence[str]]:
"""Match Unicode characters back to Unicode name
This does ``☃`` -> ``\\snowman``
Note that snowman is not a valid python3 combining character but will be expanded.
Though it will not recombine back to the snowman character by the completion machinery.
This will not either back-complete standard sequences like \\n, \\b ...
.. deprecated:: 8.6
You can use :meth:`back_unicode_name_matcher` instead.
Returns
=======
Return a tuple with two elements:
- The Unicode character that was matched (preceded with a backslash), or
empty string,
- a sequence (of 1), name for the match Unicode character, preceded by
backslash, or empty if no match.
"""
if len(text)<2:
return '', ()
maybe_slash = text[-2]
if maybe_slash != '\\':
return '', ()
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ('"',"'"):
return '', ()
try :
unic = unicodedata.name(char)
return '\\'+char,('\\'+unic,)
except KeyError:
pass
return '', ()
@context_matcher()
def back_latex_name_matcher(context: CompletionContext) -> SimpleMatcherResult:
"""Match latex characters back to unicode name
This does ``\\ℵ`` -> ``\\aleph``
"""
text = context.text_until_cursor
no_match = {
"completions": [],
"suppress": False,
}
if len(text)<2:
return no_match
maybe_slash = text[-2]
if maybe_slash != '\\':
return no_match
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ('"',"'"):
return no_match
try :
latex = reverse_latex_symbol[char]
# '\\' replace the \ as well
return {
"completions": [SimpleCompletion(text=latex, type="latex")],
"suppress": True,
"matched_fragment": "\\" + char,
}
except KeyError:
pass
return no_match
def _formatparamchildren(parameter) -> str:
"""
Get parameter name and value from Jedi Private API
Jedi does not expose a simple way to get `param=value` from its API.
Parameters
----------
parameter
Jedi's function `Param`
Returns
-------
A string like 'a', 'b=1', '*args', '**kwargs'
"""
description = parameter.description
if not description.startswith('param '):
raise ValueError('Jedi function parameter description have change format.'
'Expected "param ...", found %r".' % description)
return description[6:]
def _make_signature(completion)-> str:
"""
Make the signature from a jedi completion
Parameters
----------
completion : jedi.Completion
object does not complete a function type
Returns
-------
a string consisting of the function signature, with the parenthesis but
without the function name. example:
`(a, *args, b=1, **kwargs)`
"""
# it looks like this might work on jedi 0.17
if hasattr(completion, 'get_signatures'):
signatures = completion.get_signatures()
if not signatures:
return '(?)'
c0 = completion.get_signatures()[0]
return '('+c0.to_string().split('(', maxsplit=1)[1]
return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
for p in signature.defined_names()) if f])
_CompleteResult = dict[str, MatcherResult]
DICT_MATCHER_REGEX = re.compile(
r"""(?x)
( # match dict-referring - or any get item object - expression
.+
)
\[ # open bracket
\s* # and optional whitespace
# Capture any number of serializable objects (e.g. "a", "b", 'c')
# and slices
((?:(?:
(?: # closed string
[uUbB]? # string prefix (r not handled)
(?:
'(?:[^']|(?<!\\)\\')*'
|
"(?:[^"]|(?<!\\)\\")*"
)
)
|
# capture integers and slices
(?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
|
# integer in bin/hex/oct notation
0[bBxXoO]_?(?:\w|\d)+
)
\s*,\s*
)*)
((?:
(?: # unclosed string
[uUbB]? # string prefix (r not handled)
(?:
'(?:[^']|(?<!\\)\\')*
|
"(?:[^"]|(?<!\\)\\")*
)
)
|
# unfinished integer
(?:[-+]?\d+)
|
# integer in bin/hex/oct notation
0[bBxXoO]_?(?:\w|\d)+
)
)?
$
"""
)
def _convert_matcher_v1_result_to_v2_no_no(
matches: Sequence[str],
type: str,
) -> SimpleMatcherResult:
"""same as _convert_matcher_v1_result_to_v2 but fragment=None, and suppress_if_matches is False by construction"""
return SimpleMatcherResult(
completions=[SimpleCompletion(text=match, type=type) for match in matches],
suppress=False,
)
def _convert_matcher_v1_result_to_v2(
matches: Sequence[str],
type: str,
fragment: Optional[str] = None,
suppress_if_matches: bool = False,
) -> SimpleMatcherResult:
"""Utility to help with transition"""
result = {
"completions": [SimpleCompletion(text=match, type=type) for match in matches],
"suppress": (True if matches else False) if suppress_if_matches else False,
}
if fragment is not None:
result["matched_fragment"] = fragment
return cast(SimpleMatcherResult, result)
| _DictKeyState |
python | run-llama__llama_index | llama-index-core/llama_index/core/data_structs/data_structs.py | {
"start": 451,
"end": 974
} | class ____(DataClassJsonMixin):
"""A base data struct for a LlamaIndex."""
index_id: str = field(default_factory=lambda: str(uuid.uuid4()))
summary: Optional[str] = None
def get_summary(self) -> str:
"""Get text summary."""
if self.summary is None:
raise ValueError("summary field of the index_struct not set.")
return self.summary
@classmethod
@abstractmethod
def get_type(cls) -> IndexStructType:
"""Get index struct type."""
@dataclass
| IndexStruct |
python | prompt-toolkit__python-prompt-toolkit | examples/prompts/multiline-autosuggest.py | {
"start": 2768,
"end": 5955
} | class ____(Processor):
def __init__(self, style: str = "class:auto-suggestion") -> None:
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
# a convenient noop transformation that does nothing.
noop = Transformation(fragments=ti.fragments)
# We get out of the way if the prompt is only one line, and let prompt_toolkit handle the rest.
if ti.document.line_count == 1:
return noop
# first everything before the current line is unchanged.
if ti.lineno < ti.document.cursor_position_row:
return noop
buffer = ti.buffer_control.buffer
if not buffer.suggestion or not ti.document.is_cursor_at_the_end_of_line:
return noop
# compute the number delta between the current cursor line and line we are transforming
# transformed line can either be suggestions, or an existing line that is shifted.
delta = ti.lineno - ti.document.cursor_position_row
# convert the suggestion into a list of lines
suggestions = buffer.suggestion.text.splitlines()
if not suggestions:
return noop
if delta == 0:
# append suggestion to current line
suggestion = suggestions[0]
return Transformation(fragments=ti.fragments + [(self.style, suggestion)])
elif delta < len(suggestions):
# append a line with the nth line of the suggestion
suggestion = suggestions[delta]
assert "\n" not in suggestion
return Transformation([(self.style, suggestion)])
else:
# return the line that is by delta-1 suggestion (first suggestion does not shifts)
shift = ti.lineno - len(suggestions) + 1
return Transformation(ti.get_line(shift))
def main():
# Create some history first. (Easy for testing.)
autosuggest = FakeLLMAutoSuggest()
# Print help.
print("This CLI has fish-style auto-suggestion enabled across multiple lines.")
print("This will try to complete the universal declaration of human rights.")
print("")
print(" " + "\n ".join(universal_declaration_of_human_rights))
print("")
print("Add a few new lines to see multiline completion, and start typing.")
print("Press Control-C to retry. Control-D to exit.")
print()
session = PromptSession(
auto_suggest=autosuggest,
enable_history_search=False,
reserve_space_for_menu=5,
multiline=True,
prompt_continuation="... ",
input_processors=[
ConditionalProcessor(
processor=AppendMultilineAutoSuggestionInAnyLine(),
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone(),
),
],
)
while True:
try:
text = session.prompt(
"Say something (Esc-enter : accept, enter : new line): "
)
except KeyboardInterrupt:
pass # Ctrl-C pressed. Try again.
else:
break
print(f"You said: {text}")
if __name__ == "__main__":
main()
| AppendMultilineAutoSuggestionInAnyLine |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1084568,
"end": 1089353
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable, RequirableByPullRequest):
"""A check run."""
__schema__ = github_schema
__field_names__ = (
"annotations",
"check_suite",
"completed_at",
"conclusion",
"database_id",
"deployment",
"details_url",
"external_id",
"name",
"pending_deployment_request",
"permalink",
"repository",
"started_at",
"status",
"steps",
"summary",
"text",
"title",
)
annotations = sgqlc.types.Field(
CheckAnnotationConnection,
graphql_name="annotations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The check run's annotations
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
check_suite = sgqlc.types.Field(sgqlc.types.non_null("CheckSuite"), graphql_name="checkSuite")
"""The check suite that this run is a part of."""
completed_at = sgqlc.types.Field(DateTime, graphql_name="completedAt")
"""Identifies the date and time when the check run was completed."""
conclusion = sgqlc.types.Field(CheckConclusionState, graphql_name="conclusion")
"""The conclusion of the check run."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
deployment = sgqlc.types.Field("Deployment", graphql_name="deployment")
"""The corresponding deployment for this job, if any"""
details_url = sgqlc.types.Field(URI, graphql_name="detailsUrl")
"""The URL from which to find full details of the check run on the
integrator's site.
"""
external_id = sgqlc.types.Field(String, graphql_name="externalId")
"""A reference for the check run on the integrator's system."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the check for this check run."""
pending_deployment_request = sgqlc.types.Field(DeploymentRequest, graphql_name="pendingDeploymentRequest")
"""Information about a pending deployment, if any, in this check run"""
permalink = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="permalink")
"""The permalink to the check run summary."""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository associated with this check run."""
started_at = sgqlc.types.Field(DateTime, graphql_name="startedAt")
"""Identifies the date and time when the check run was started."""
status = sgqlc.types.Field(sgqlc.types.non_null(CheckStatusState), graphql_name="status")
"""The current status of the check run."""
steps = sgqlc.types.Field(
CheckStepConnection,
graphql_name="steps",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("number", sgqlc.types.Arg(Int, graphql_name="number", default=None)),
)
),
)
"""The check run's steps
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `number` (`Int`): Step number
"""
summary = sgqlc.types.Field(String, graphql_name="summary")
"""A string representing the check run's summary"""
text = sgqlc.types.Field(String, graphql_name="text")
"""A string representing the check run's text"""
title = sgqlc.types.Field(String, graphql_name="title")
"""A string representing the check run"""
| CheckRun |
python | keras-team__keras | keras/src/backend/common/symbolic_scope.py | {
"start": 135,
"end": 683
} | class ____:
"""Scope to indicate the symbolic stage."""
def __enter__(self):
self.original_scope = get_symbolic_scope()
global_state.set_global_attribute("symbolic_scope", self)
return self
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute("symbolic_scope", self.original_scope)
def in_symbolic_scope():
return global_state.get_global_attribute("symbolic_scope") is not None
def get_symbolic_scope():
return global_state.get_global_attribute("symbolic_scope")
| SymbolicScope |
python | pypa__packaging | src/packaging/specifiers.py | {
"start": 1818,
"end": 3537
} | class ____(metaclass=abc.ABCMeta):
__slots__ = ()
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> bool | None:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter # noqa: B027
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: bool | None = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
| BaseSpecifier |
python | django__django | tests/forms_tests/field_tests/test_charfield.py | {
"start": 216,
"end": 6444
} | class ____(FormFieldAssertionsMixin, SimpleTestCase):
def test_charfield_1(self):
f = CharField()
self.assertEqual("1", f.clean(1))
self.assertEqual("hello", f.clean("hello"))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
self.assertEqual("[1, 2, 3]", f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual("1", f.clean(1))
self.assertEqual("hello", f.clean("hello"))
self.assertEqual("", f.clean(None))
self.assertEqual("", f.clean(""))
self.assertEqual("[1, 2, 3]", f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual("12345", f.clean("12345"))
self.assertEqual("1234567890", f.clean("1234567890"))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("1234567890a")
self.assertEqual(f.max_length, 10)
self.assertIsNone(f.min_length)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual("", f.clean(""))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("12345")
self.assertEqual("1234567890", f.clean("1234567890"))
self.assertEqual("1234567890a", f.clean("1234567890a"))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("12345")
self.assertEqual("1234567890", f.clean("1234567890"))
self.assertEqual("1234567890a", f.clean("1234567890a"))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Setting min_length or max_length to something that is not a number
raises an exception.
"""
with self.assertRaises(ValueError):
CharField(min_length="a")
with self.assertRaises(ValueError):
CharField(max_length="a")
msg = "__init__() takes 1 positional argument but 2 were given"
with self.assertRaisesMessage(TypeError, msg):
CharField("a")
def test_charfield_widget_attrs(self):
"""
CharField.widget_attrs() always returns a dictionary and includes
minlength/maxlength if min_length/max_length are defined on the field
and the widget is not hidden.
"""
# Return an empty dictionary if max_length and min_length are both
# None.
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Return a maxlength attribute equal to max_length.
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {"maxlength": "10"})
self.assertEqual(f.widget_attrs(PasswordInput()), {"maxlength": "10"})
self.assertEqual(f.widget_attrs(Textarea()), {"maxlength": "10"})
# Return a minlength attribute equal to min_length.
f = CharField(min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {"minlength": "5"})
self.assertEqual(f.widget_attrs(PasswordInput()), {"minlength": "5"})
self.assertEqual(f.widget_attrs(Textarea()), {"minlength": "5"})
# Return both maxlength and minlength when both max_length and
# min_length are set.
f = CharField(max_length=10, min_length=5)
self.assertEqual(
f.widget_attrs(TextInput()), {"maxlength": "10", "minlength": "5"}
)
self.assertEqual(
f.widget_attrs(PasswordInput()), {"maxlength": "10", "minlength": "5"}
)
self.assertEqual(
f.widget_attrs(Textarea()), {"maxlength": "10", "minlength": "5"}
)
self.assertEqual(f.widget_attrs(HiddenInput()), {})
def test_charfield_strip(self):
"""
Values have whitespace stripped but not if strip=False.
"""
f = CharField()
self.assertEqual(f.clean(" 1"), "1")
self.assertEqual(f.clean("1 "), "1")
f = CharField(strip=False)
self.assertEqual(f.clean(" 1"), " 1")
self.assertEqual(f.clean("1 "), "1 ")
def test_strip_before_checking_empty(self):
"""
A whitespace-only value, ' ', is stripped to an empty string and then
converted to the empty value, None.
"""
f = CharField(required=False, empty_value=None)
self.assertIsNone(f.clean(" "))
def test_clean_non_string(self):
"""CharField.clean() calls str(value) before stripping it."""
class StringWrapper:
def __init__(self, v):
self.v = v
def __str__(self):
return self.v
value = StringWrapper(" ")
f1 = CharField(required=False, empty_value=None)
self.assertIsNone(f1.clean(value))
f2 = CharField(strip=False)
self.assertEqual(f2.clean(value), " ")
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(
f, '<input type="text" name="f" id="id_f" disabled required>'
)
def test_null_characters_prohibited(self):
f = CharField()
msg = "Null characters are not allowed."
with self.assertRaisesMessage(ValidationError, msg):
f.clean("\x00something")
| CharFieldTest |
python | catalyst-team__catalyst | catalyst/contrib/layers/curricularface.py | {
"start": 82,
"end": 3902
} | class ____(nn.Module):
"""Implementation of
`CurricularFace: Adaptive Curriculum Learning\
Loss for Deep Face Recognition`_.
.. _CurricularFace\: Adaptive Curriculum Learning\
Loss for Deep Face Recognition:
https://arxiv.org/abs/2004.00288
Official `pytorch implementation`_.
.. _pytorch implementation:
https://github.com/HuangYG123/CurricularFace
Args:
in_features: size of each input sample.
out_features: size of each output sample.
s: norm of input feature.
Default: ``64.0``.
m: margin.
Default: ``0.5``.
Shape:
- Input: :math:`(batch, H_{in})` where
:math:`H_{in} = in\_features`.
- Output: :math:`(batch, H_{out})` where
:math:`H_{out} = out\_features`.
Example:
>>> layer = CurricularFace(5, 10, s=1.31, m=0.5)
>>> loss_fn = nn.CrosEntropyLoss()
>>> embedding = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(10)
>>> output = layer(embedding, target)
>>> loss = loss_fn(output, target)
>>> self.engine.backward(loss)
""" # noqa: RST215
def __init__( # noqa: D107
self, in_features: int, out_features: int, s: float = 64.0, m: float = 0.5
):
super(CurricularFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.m = m
self.s = s
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.threshold = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
self.weight = nn.Parameter(torch.Tensor(in_features, out_features))
self.register_buffer("t", torch.zeros(1))
nn.init.normal_(self.weight, std=0.01)
def __repr__(self) -> str: # noqa: D105
rep = (
"CurricularFace("
f"in_features={self.in_features},"
f"out_features={self.out_features},"
f"m={self.m},s={self.s}"
")"
)
return rep
def forward(
self, input: torch.Tensor, label: torch.LongTensor = None
) -> torch.Tensor:
"""
Args:
input: input features,
expected shapes ``BxF`` where ``B``
is batch dimension and ``F`` is an
input feature dimension.
label: target classes,
expected shapes ``B`` where
``B`` is batch dimension.
If `None` then will be returned
projection on centroids.
Default is `None`.
Returns:
tensor (logits) with shapes ``BxC``
where ``C`` is a number of classes.
"""
cos_theta = torch.mm(F.normalize(input), F.normalize(self.weight, dim=0))
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
if label is None:
return cos_theta
target_logit = cos_theta[torch.arange(0, input.size(0)), label].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
cos_theta_m = (
target_logit * self.cos_m - sin_theta * self.sin_m
) # cos(target+margin)
mask = cos_theta > cos_theta_m
final_target_logit = torch.where(
target_logit > self.threshold, cos_theta_m, target_logit - self.mm
)
hard_example = cos_theta[mask]
with torch.no_grad():
self.t = target_logit.mean() * 0.01 + (1 - 0.01) * self.t
cos_theta[mask] = hard_example * (self.t + hard_example)
cos_theta.scatter_(1, label.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.s
return output
__all__ = ["CurricularFace"]
| CurricularFace |
python | langchain-ai__langchain | libs/core/langchain_core/prompt_values.py | {
"start": 2007,
"end": 2699
} | class ____(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "prompts", "chat"]`
"""
return ["langchain", "prompts", "chat"]
| ChatPromptValue |
python | pytorch__pytorch | test/test_jit_disabled.py | {
"start": 1818,
"end": 2113
} | class ____(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
pass
AModule()
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
def test_recursive_script(self):
_program_string = """
import torch
| AModule |
python | pytorch__pytorch | .ci/pytorch/smoke_test/smoke_test.py | {
"start": 1237,
"end": 18412
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
output = self.fc1(x)
return output
def load_json_from_basedir(filename: str):
try:
with open(BASE_DIR / filename) as fptr:
return json.load(fptr)
except FileNotFoundError as exc:
raise ImportError(f"File {filename} not found error: {exc.strerror}") from exc
except json.JSONDecodeError as exc:
raise ImportError(f"Invalid JSON {filename}") from exc
def read_release_matrix():
return load_json_from_basedir("release_matrix.json")
def test_numpy():
try:
import numpy as np
x = np.arange(5)
torch.tensor(x)
except ImportError:
print("Numpy check skipped. Numpy is not installed.")
def check_version(package: str) -> None:
release_version = os.getenv("RELEASE_VERSION")
# if release_version is specified, use it to validate the packages
if release_version:
release_matrix = read_release_matrix()
stable_version = release_matrix["torch"]
else:
stable_version = os.getenv("MATRIX_STABLE_VERSION")
# only makes sense to check nightly package where dates are known
if channel == "nightly":
check_nightly_binaries_date(package)
elif stable_version is not None:
if not torch.__version__.startswith(stable_version):
raise RuntimeError(
f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}"
)
if release_version and package == "all":
for module in MODULES:
imported_module = importlib.import_module(module["name"])
module_version = imported_module.__version__
if not module_version.startswith(release_matrix[module["name"]]):
raise RuntimeError(
f"{module['name']} version mismatch, expected: \
{release_matrix[module['name']]} for channel {channel}. But its {module_version}"
)
else:
print(
f"{module['name']} version actual: {module_version} expected: \
{release_matrix[module['name']]} for channel {channel}."
)
else:
print(f"Skip version check for channel {channel} as stable version is None")
def check_nightly_binaries_date(package: str) -> None:
from datetime import datetime
format_dt = "%Y%m%d"
date_t_str = re.findall("dev\\d+", torch.__version__)
date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt)
if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!"
)
if package == "all":
for module in MODULES:
imported_module = importlib.import_module(module["name"])
module_version = imported_module.__version__
date_m_str = re.findall("dev\\d+", module_version)
date_m_delta = datetime.now() - datetime.strptime(
date_m_str[0][3:], format_dt
)
print(f"Nightly date check for {module['name']} version {module_version}")
if date_m_delta.days > NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}"
)
def test_cuda_runtime_errors_captured() -> None:
cuda_exception_missed = True
try:
print("Testing test_cuda_runtime_errors_captured")
torch._assert_async(torch.tensor(0, device="cuda"))
torch._assert_async(torch.tensor(0 + 0j, device="cuda"))
except RuntimeError as e:
if re.search("CUDA", f"{e}"):
print(f"Caught CUDA exception with success: {e}")
cuda_exception_missed = False
else:
raise e
if cuda_exception_missed:
raise RuntimeError("Expected CUDA RuntimeError but have not received!")
def test_cuda_gds_errors_captured() -> None:
major_version = int(torch.version.cuda.split(".")[0])
minor_version = int(torch.version.cuda.split(".")[1])
if target_os == "windows":
print(f"{target_os} is not supported for GDS smoke test")
return
if major_version < 12 or (major_version == 12 and minor_version < 6):
print("CUDA version is not supported for GDS smoke test")
return
cuda_exception_missed = True
try:
print("Testing test_cuda_gds_errors_captured")
with NamedTemporaryFile() as f:
torch.cuda.gds.GdsFile(f.name, os.O_CREAT | os.O_RDWR)
except RuntimeError as e:
expected_error = "cuFileHandleRegister failed"
if re.search(expected_error, f"{e}"):
print(f"Caught CUDA exception with success: {e}")
cuda_exception_missed = False
else:
raise e
if cuda_exception_missed:
raise RuntimeError(
"Expected cuFileHandleRegister failed RuntimeError but have not received!"
)
def find_pypi_package_version(package: str) -> Optional[str]:
from importlib import metadata
dists = metadata.distributions()
for dist in dists:
if dist.metadata["Name"].startswith(package):
return dist.version
return None
def cudnn_to_version_str(cudnn_version: int) -> str:
patch = int(cudnn_version % 10)
minor = int((cudnn_version / 100) % 100)
major = int((cudnn_version / 10000) % 10000)
return f"{major}.{minor}.{patch}"
def compare_pypi_to_torch_versions(
package: str, pypi_version: str, torch_version: str
) -> None:
if pypi_version is None:
raise RuntimeError(f"Can't find {package} in PyPI for Torch: {torch_version}")
if pypi_version.startswith(torch_version):
print(f"Found matching {package}. Torch: {torch_version} PyPI {pypi_version}")
else:
raise RuntimeError(
f"Wrong {package} version. Torch: {torch_version} PyPI: {pypi_version}"
)
def smoke_test_cuda(
package: str,
runtime_error_check: str,
torch_compile_check: str,
pypi_pkg_check: str,
) -> None:
if not torch.cuda.is_available() and is_cuda_system:
raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.")
if package == "all" and is_cuda_system:
for module in MODULES:
imported_module = importlib.import_module(module["name"])
# TBD for vision move extension module to private so it will
# be _extention.
version = "N/A"
if module["extension"] == "extension":
version = imported_module.extension._check_cuda_version()
else:
version = imported_module._extension._check_cuda_version()
print(f"{module['name']} CUDA: {version}")
# torch.compile is available on macos-arm64 and Linux for python 3.8-3.13
if (
torch_compile_check == "enabled"
and sys.version_info < (3, 14, 0)
and target_os in ["linux", "linux-aarch64", "macos-arm64", "darwin"]
):
smoke_test_compile("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
if torch.version.cuda != gpu_arch_ver:
raise RuntimeError(
f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}"
)
print(f"torch cuda: {torch.version.cuda}")
torch.cuda.init()
print("CUDA initialized successfully")
print(f"Number of CUDA devices: {torch.cuda.device_count()}")
for i in range(torch.cuda.device_count()):
print(f"Device {i}: {torch.cuda.get_device_name(i)}")
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
torch_cudnn_version = cudnn_to_version_str(torch.backends.cudnn.version())
print(f"Torch cuDNN version: {torch_cudnn_version}")
if sys.platform in ["linux", "linux2"]:
torch_nccl_version = ".".join(str(v) for v in torch.cuda.nccl.version())
print(f"Torch nccl; version: {torch_nccl_version}")
# Pypi dependencies are installed on linux only and nccl is available only on Linux.
if pypi_pkg_check == "enabled" and sys.platform in ["linux", "linux2"]:
compare_pypi_to_torch_versions(
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
)
compare_pypi_to_torch_versions(
"nccl", find_pypi_package_version("nvidia-nccl"), torch_nccl_version
)
if runtime_error_check == "enabled":
test_cuda_runtime_errors_captured()
def smoke_test_conv2d() -> None:
import torch.nn as nn
print("Testing smoke_test_conv2d")
# With square kernels and equal stride
m = nn.Conv2d(16, 33, 3, stride=2)
# non-square kernels and unequal stride and with padding
m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
assert m is not None
# non-square kernels and unequal stride and with padding and dilation
basic_conv = nn.Conv2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
input = torch.randn(20, 16, 50, 100)
output = basic_conv(input)
if is_cuda_system:
print("Testing smoke_test_conv2d with cuda")
conv = nn.Conv2d(3, 3, 3).cuda()
x = torch.randn(1, 3, 24, 24, device="cuda")
with torch.cuda.amp.autocast():
out = conv(x)
assert out is not None
supported_dtypes = [torch.float16, torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_conv2d with cuda for {dtype}")
conv = basic_conv.to(dtype).cuda()
input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
output = conv(input)
assert output is not None
def test_linalg(device="cpu") -> None:
print(f"Testing smoke_test_linalg on {device}")
A = torch.randn(5, 3, device=device)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
assert (
U.shape == A.shape
and S.shape == torch.Size([3])
and Vh.shape == torch.Size([3, 3])
)
torch.dist(A, U @ torch.diag(S) @ Vh)
U, S, Vh = torch.linalg.svd(A)
assert (
U.shape == torch.Size([5, 5])
and S.shape == torch.Size([3])
and Vh.shape == torch.Size([3, 3])
)
torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
A = torch.randn(7, 5, 3, device=device)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
torch.dist(A, U @ torch.diag_embed(S) @ Vh)
if device == "cuda":
supported_dtypes = [torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_linalg with cuda for {dtype}")
A = torch.randn(20, 16, 50, 100, device=device, dtype=dtype)
torch.linalg.svd(A)
def test_sdpa(device="cpu", dtype=torch.float16) -> None:
"""Regression test for https://github.com/pytorch/pytorch/issues/167602
Without nvrtc_builtins on CuDNN-9.13 on CUDA-13 fails with ` No valid execution plans built.`
"""
print(f"Testing SDPA on {device} using type {dtype}")
k, q, v = torch.rand(3, 1, 16, 77, 64, dtype=dtype, device=device).unbind(0)
attn = torch.rand(1, 1, 77, 77, dtype=dtype, device=device)
rc = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn)
assert rc.isnan().any().item() is False
def smoke_test_compile(device: str = "cpu") -> None:
supported_dtypes = [torch.float16, torch.float32, torch.float64]
def foo(x: torch.Tensor) -> torch.Tensor:
return torch.sin(x) + torch.cos(x)
for dtype in supported_dtypes:
print(f"Testing smoke_test_compile for {device} and {dtype}")
x = torch.rand(3, 3, device=device).type(dtype)
x_eager = foo(x)
x_pt2 = torch.compile(foo)(x)
torch.testing.assert_close(x_eager, x_pt2)
# Check that SIMD were detected for the architecture
if device == "cpu":
from torch._inductor.codecache import pick_vec_isa
isa = pick_vec_isa()
if not isa:
raise RuntimeError("Can't detect vectorized ISA for CPU")
print(f"Picked CPU ISA {type(isa).__name__} bit width {isa.bit_width()}")
# Reset torch dynamo since we are changing mode
torch._dynamo.reset()
dtype = torch.float32
torch.set_float32_matmul_precision("high")
print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}")
x = torch.rand(64, 1, 28, 28, device=device).type(torch.float32)
model = Net().to(device=device)
x_pt2 = torch.compile(model, mode="max-autotune")(x)
def smoke_test_nvshmem() -> None:
if not torch.cuda.is_available() or target_os == "windows":
print("Windows platform or CUDA is not available, skipping NVSHMEM test")
return
# Check if NVSHMEM is compiled in current build
try:
from torch._C._distributed_c10d import _is_nvshmem_available
except ImportError:
# Not built with NVSHMEM support.
# torch is not compiled with NVSHMEM prior to 2.9
from torch.torch_version import TorchVersion
if TorchVersion(torch.__version__) < (2, 9):
return
else:
# After 2.9: NVSHMEM is expected to be compiled in current build
raise RuntimeError("torch not compiled with NVSHMEM") from None
print("torch compiled with NVSHMEM")
# Check if NVSHMEM is available on current system.
print(f"NVSHMEM available at run time: {_is_nvshmem_available()}")
def smoke_test_modules():
cwd = os.getcwd()
for module in MODULES:
if module["repo"]:
if not os.path.exists(f"{cwd}/{module['repo_name']}"):
print(f"Path does not exist: {cwd}/{module['repo_name']}")
try:
subprocess.check_output(
f"git clone --depth 1 {module['repo']}",
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
f"Cloning {module['repo']} FAIL: {exc.returncode} Output: {exc.output}"
) from exc
try:
smoke_test_command = f"python3 {module['smoke_test']}"
if target_os == "windows":
smoke_test_command = f"python {module['smoke_test']}"
output = subprocess.check_output(
smoke_test_command,
stderr=subprocess.STDOUT,
shell=True,
universal_newlines=True,
)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}"
) from exc
else:
print(f"Output: \n{output}\n")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--package",
help="Package to include in smoke testing",
type=str,
choices=["all", "torchonly"],
default="all",
)
parser.add_argument(
"--runtime-error-check",
help="No Runtime Error check",
type=str,
choices=["enabled", "disabled"],
default="enabled",
)
parser.add_argument(
"--torch-compile-check",
help="Check torch compile",
type=str,
choices=["enabled", "disabled"],
default="enabled",
)
parser.add_argument(
"--pypi-pkg-check",
help="Check pypi package versions cudnn and nccl",
type=str,
choices=["enabled", "disabled"],
default="enabled",
)
return parser.parse_args()
def main() -> None:
options = parse_args()
print(f"torch: {torch.__version__}")
print(torch.__config__.parallel_info())
# All PyTorch binary builds should be built with OpenMP
if not torch.backends.openmp.is_available():
raise RuntimeError("PyTorch must be built with OpenMP support")
check_version(options.package)
smoke_test_conv2d()
test_linalg()
test_numpy()
test_sdpa()
if is_cuda_system:
test_linalg("cuda")
test_cuda_gds_errors_captured()
test_sdpa("cuda")
if options.package == "all":
smoke_test_modules()
smoke_test_cuda(
options.package,
options.runtime_error_check,
options.torch_compile_check,
options.pypi_pkg_check,
)
smoke_test_nvshmem()
if __name__ == "__main__":
main()
| Net |
python | doocs__leetcode | lcci/05.06.Convert Integer/Solution.py | {
"start": 0,
"end": 152
} | class ____:
def convertInteger(self, A: int, B: int) -> int:
A &= 0xFFFFFFFF
B &= 0xFFFFFFFF
return (A ^ B).bit_count()
| Solution |
python | python-pillow__Pillow | src/PIL/ImageShow.py | {
"start": 8343,
"end": 9568
} | class ____(UnixViewer):
"""
The X Viewer ``xv`` command.
This viewer supports the ``title`` parameter.
"""
def get_command_ex(
self, file: str, title: str | None = None, **options: Any
) -> tuple[str, str]:
# note: xv is pretty outdated. most modern systems have
# imagemagick's display command instead.
command = executable = "xv"
if title:
command += f" -name {quote(title)}"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
args = ["xv"]
title = options.get("title")
if title:
args += ["-name", title]
args.append(path)
subprocess.Popen(args)
return 1
if sys.platform not in ("win32", "darwin"): # unixoids
if shutil.which("xdg-open"):
register(XDGViewer)
if shutil.which("display"):
register(DisplayViewer)
if shutil.which("gm"):
register(GmDisplayViewer)
if shutil.which("eog"):
register(EogViewer)
if shutil.which("xv"):
register(XVViewer)
| XVViewer |
python | getsentry__sentry | tests/sentry/incidents/serializers/test_workflow_engine_incident.py | {
"start": 783,
"end": 5125
} | class ____(TestWorkflowEngineSerializer):
def setUp(self) -> None:
super().setUp()
self.add_warning_trigger()
self.add_incident_data()
self.incident_identifier = str(self.incident_group_open_period.incident_identifier)
self.incident_expected = {
"id": str(self.incident_group_open_period.incident_id),
"identifier": self.incident_identifier,
"organizationId": str(self.group_open_period.project.organization_id),
"projects": [self.project.slug],
"alertRule": self.expected,
"activities": None,
"status": IncidentStatus.CRITICAL.value,
"statusMethod": IncidentStatusMethod.RULE_TRIGGERED.value,
"type": IncidentType.ALERT_TRIGGERED.value,
"title": self.group.title,
"dateStarted": self.group_open_period.date_started,
"dateDetected": self.group_open_period.date_started,
"dateCreated": self.group_open_period.date_added,
"dateClosed": None,
}
def test_simple(self) -> None:
serialized_incident = serialize(
self.group_open_period, self.user, WorkflowEngineIncidentSerializer()
)
assert serialized_incident == self.incident_expected
def test_detailed(self) -> None:
serialized_incident = serialize(
self.group_open_period, self.user, WorkflowEngineDetailedIncidentSerializer()
)
self.incident_expected["discoverQuery"] = "(event.type:error) AND (level:error)"
assert serialized_incident == self.incident_expected
def test_no_incident(self) -> None:
"""
Assert that nothing breaks if the legacy models do not exist.
"""
self.incident_group_open_period.delete()
ard = AlertRuleDetector.objects.filter(detector_id=self.detector.id)
dcart = DataConditionAlertRuleTrigger.objects.filter(
data_condition_id=self.critical_detector_trigger.id
)
aarta = ActionAlertRuleTriggerAction.objects.filter(action_id=self.critical_action.id)
ard.delete()
dcart.delete()
aarta.delete()
serialized_incident = serialize(
self.group_open_period, self.user, WorkflowEngineIncidentSerializer()
)
fake_alert_rule_id = get_fake_id_from_object_id(self.detector.id)
fake_incident_id = get_fake_id_from_object_id(self.group_open_period.id)
self.expected.update({"id": str(fake_alert_rule_id)})
self.expected["triggers"][0].update(
{
"id": str(get_fake_id_from_object_id(self.critical_detector_trigger.id)),
"alertRuleId": str(fake_alert_rule_id),
}
)
self.expected["triggers"][1].update(
{
"alertRuleId": str(fake_alert_rule_id),
}
)
self.expected["triggers"][0]["actions"][0].update(
{
"id": str(get_fake_id_from_object_id(self.critical_action.id)),
"alertRuleTriggerId": str(
get_fake_id_from_object_id(self.critical_detector_trigger.id)
),
}
)
self.incident_expected.update(
{
"id": str(fake_incident_id),
"identifier": str(fake_incident_id),
}
)
assert serialized_incident == self.incident_expected
def test_with_activities(self) -> None:
gopa = GroupOpenPeriodActivity.objects.create(
date_added=self.group_open_period.date_added,
group_open_period=self.group_open_period,
type=OpenPeriodActivityType.OPENED,
value=self.group.priority,
)
serialized_incident = serialize(
self.group_open_period,
self.user,
WorkflowEngineIncidentSerializer(expand=["activities"]),
)
assert len(serialized_incident["activities"]) == 1
serialized_activity = serialized_incident["activities"][0]
assert serialized_activity == {
"id": str(gopa.id),
"type": OpenPeriodActivityType.OPENED.to_str(),
"value": PriorityLevel(self.group.priority).to_str(),
"dateCreated": gopa.date_added,
}
| TestIncidentSerializer |
python | getsentry__sentry | tests/sentry_plugins/amazon_sqs/test_plugin.py | {
"start": 401,
"end": 5219
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> AmazonSQSPlugin:
return AmazonSQSPlugin()
def run_test(self) -> Event:
self.plugin.set_option("access_key", "access-key", self.project)
self.plugin.set_option("secret_key", "secret-key", self.project)
self.plugin.set_option("region", "us-east-1", self.project)
self.plugin.set_option(
"queue_url", "https://sqs.us-east-1.amazonaws.com/12345678/myqueue", self.project
)
event = self.store_event(
data={
"sentry.interfaces.Exception": {"type": "ValueError", "value": "foo bar"},
"sentry.interfaces.User": {"id": "1", "email": "foo@example.com"},
"type": "error",
"metadata": {"type": "ValueError", "value": "foo bar"},
},
project_id=self.project.id,
)
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.post_process(event=event)
return event
@patch("boto3.client")
def test_simple_notification(self, mock_client: MagicMock) -> None:
event = self.run_test()
mock_client.assert_called_once_with(
service_name="sqs",
region_name="us-east-1",
aws_access_key_id="access-key",
aws_secret_access_key="secret-key",
)
mock_client.return_value.send_message.assert_called_once_with(
QueueUrl="https://sqs.us-east-1.amazonaws.com/12345678/myqueue",
MessageBody=orjson.dumps(
self.plugin.get_event_payload(event), option=orjson.OPT_UTC_Z
).decode(),
)
@patch("boto3.client")
def test_token_error(self, mock_client: MagicMock) -> None:
mock_client.return_value.send_message.side_effect = ClientError(
{"Error": {"Code": "Hello", "Message": "hello"}}, "SendMessage"
)
with pytest.raises(ClientError):
self.run_test()
mock_client.return_value.send_message.side_effect = ClientError(
{"Error": {"Code": "AccessDenied", "Message": "Hello"}}, "SendMessage"
)
self.run_test()
@patch("boto3.client")
def test_message_group_error(self, mock_client: MagicMock) -> None:
mock_client.return_value.send_message.side_effect = ClientError(
{
"Error": {
"Code": "MissingParameter",
"Message": "The request must contain the parameter MessageGroupId.",
}
},
"SendMessage",
)
self.run_test()
@patch("uuid.uuid4")
@patch("boto3.client")
def test_pass_message_group_id(self, mock_client: MagicMock, mock_uuid: MagicMock) -> None:
mock_uuid.return_value = self.get_mock_uuid()
self.plugin.set_option("message_group_id", "my_group", self.project)
event = self.run_test()
mock_client.return_value.send_message.assert_called_once_with(
QueueUrl="https://sqs.us-east-1.amazonaws.com/12345678/myqueue",
MessageBody=orjson.dumps(
self.plugin.get_event_payload(event), option=orjson.OPT_UTC_Z
).decode(),
MessageGroupId="my_group",
MessageDeduplicationId="abc123",
)
@patch("boto3.client")
def test_use_s3_bucket(self, mock_client: MagicMock) -> None:
self.plugin.set_option("s3_bucket", "my_bucket", self.project)
event = self.run_test()
date = event.datetime.strftime("%Y-%m-%d")
key = f"{event.project.slug}/{date}/{event.event_id}"
mock_client.return_value.send_message.assert_called_once_with(
QueueUrl="https://sqs.us-east-1.amazonaws.com/12345678/myqueue",
MessageBody=orjson.dumps(
{
"s3Url": f"https://my_bucket.s3-us-east-1.amazonaws.com/{key}",
"eventID": event.event_id,
},
option=orjson.OPT_UTC_Z,
).decode(),
)
mock_client.return_value.put_object.assert_called_once_with(
Bucket="my_bucket",
Body=orjson.dumps(
self.plugin.get_event_payload(event), option=orjson.OPT_UTC_Z
).decode(),
Key=key,
)
@patch("boto3.client")
@pytest.mark.skip(reason="https://github.com/getsentry/sentry/issues/44858")
def test_invalid_s3_bucket(self, mock_client: MagicMock, logger: MagicMock) -> None:
self.plugin.set_option("s3_bucket", "bad_bucket", self.project)
mock_client.return_value.put_object.side_effect = ClientError(
{"Error": {"Code": "NoSuchBucket"}},
"PutObject",
)
self.run_test()
| AmazonSQSPluginTest |
python | coleifer__peewee | peewee.py | {
"start": 13731,
"end": 15013
} | class ____(object):
__slots__ = ('_counter', '_current_index', '_mapping')
def __init__(self):
# A list of dictionaries containing mappings at various depths.
self._counter = 0
self._current_index = 0
self._mapping = []
self.push()
@property
def mapping(self):
return self._mapping[self._current_index - 1]
def add(self, source):
if source not in self.mapping:
self._counter += 1
self[source] = 't%d' % self._counter
return self.mapping[source]
def get(self, source, any_depth=False):
if any_depth:
for idx in reversed(range(self._current_index)):
if source in self._mapping[idx]:
return self._mapping[idx][source]
return self.add(source)
def __getitem__(self, source):
return self.get(source)
def __setitem__(self, source, alias):
self.mapping[source] = alias
def push(self):
self._current_index += 1
if self._current_index > len(self._mapping):
self._mapping.append({})
def pop(self):
if self._current_index == 1:
raise ValueError('Cannot pop() from empty alias manager.')
self._current_index -= 1
| AliasManager |
python | qdrant__qdrant-client | qdrant_client/embed/builtin_embedder.py | {
"start": 127,
"end": 3190
} | class ____:
_SUPPORTED_MODELS = ("Qdrant/Bm25",)
def __init__(self, **kwargs: Any) -> None:
pass
def embed(
self,
model_name: str,
texts: Optional[list[str]] = None,
options: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> NumericVector:
if texts is None:
if "images" in kwargs:
raise ValueError(
"Image processing is only available with cloud inference of FastEmbed"
)
raise ValueError("Texts must be provided for the inference")
if not self.is_supported_sparse_model(model_name):
raise ValueError(
f"Model {model_name} is not supported in {self.__class__.__name__}. "
f"Did you forget to enable cloud inference or install FastEmbed for local inference?"
)
return [models.Document(text=text, options=options, model=model_name) for text in texts]
@classmethod
def is_supported_text_model(cls, model_name: str) -> bool:
"""Mock embedder interface, only sparse text model Qdrant/Bm25 is supported
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return False # currently only Qdrant/Bm25 is supported
@classmethod
def is_supported_image_model(cls, model_name: str) -> bool:
"""Mock embedder interface, only sparse text model Qdrant/Bm25 is supported
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return False # currently only Qdrant/Bm25 is supported
@classmethod
def is_supported_late_interaction_text_model(cls, model_name: str) -> bool:
"""Mock embedder interface, only sparse text model Qdrant/Bm25 is supported
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return False # currently only Qdrant/Bm25 is supported
@classmethod
def is_supported_late_interaction_multimodal_model(cls, model_name: str) -> bool:
"""Mock embedder interface, only sparse text model Qdrant/Bm25 is supported
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return False # currently only Qdrant/Bm25 is supported
@classmethod
def is_supported_sparse_model(cls, model_name: str) -> bool:
"""Checks if the model is supported. Only `Qdrant/Bm25` is supported
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return model_name.lower() in [model.lower() for model in cls._SUPPORTED_MODELS]
| BuiltinEmbedder |
python | getsentry__sentry | src/sentry/db/postgres/schema.py | {
"start": 2238,
"end": 2885
} | class ____(PostgresDatabaseSchemaEditor):
"""workaround for https://code.djangoproject.com/ticket/36374"""
def create_model(self, model: type[Model]) -> None:
if any(isinstance(c, ExclusionConstraint) for c in model._meta.constraints):
self.execute("CREATE EXTENSION IF NOT EXISTS btree_gist;")
super().create_model(model)
def add_constraint(self, model: type[Model], constraint: BaseConstraint) -> None:
if isinstance(constraint, ExclusionConstraint):
self.execute("CREATE EXTENSION IF NOT EXISTS btree_gist;")
super().add_constraint(model, constraint)
| MakeBtreeGistSchemaEditor |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 47188,
"end": 56369
} | class ____(_ConfigUpdateModel):
description: Optional[str] = Field(default=None)
property_descriptions: Optional[Dict[str, str]] = Field(default=None)
invertedIndexConfig: Optional[_InvertedIndexConfigUpdate] = Field(
default=None, alias="inverted_index_config"
)
replicationConfig: Optional[_ReplicationConfigUpdate] = Field(
default=None, alias="replication_config"
)
vectorIndexConfig: Optional[_VectorIndexConfigUpdate] = Field(
default=None, alias="vector_index_config"
)
vectorizerConfig: Optional[Union[_VectorIndexConfigUpdate, List[_NamedVectorConfigUpdate]]] = (
Field(default=None, alias="vectorizer_config")
)
vectorConfig: Optional[Union[_VectorConfigUpdate, List[_VectorConfigUpdate]]] = Field(
default=None, alias="vector_config"
)
multiTenancyConfig: Optional[_MultiTenancyConfigUpdate] = Field(
default=None, alias="multi_tenancy_config"
)
generativeConfig: Optional[_GenerativeProvider] = Field(default=None, alias="generative_config")
rerankerConfig: Optional[_RerankerProvider] = Field(default=None, alias="reranker_config")
@field_validator("vectorConfig", mode="before")
def mutual_exclusivity(
cls,
v: Optional[Union[_VectorConfigUpdate, List[_VectorConfigUpdate]]],
info: ValidationInfo,
):
if v is None:
return v
if info.data["vectorizerConfig"] is not None:
raise ValueError(
"Cannot specify vectorizerConfig when also specifying vectorConfig. Please use one or the other."
)
if info.data["vectorIndexConfig"] is not None:
raise ValueError(
"Cannot specify vectorIndexConfig when also specifying vectorConfig. Please use one or the other."
)
return v
def __check_quantizers(
self,
quantizer: Optional[_QuantizerConfigUpdate],
vector_index_config: dict,
) -> None:
if (
(
isinstance(quantizer, _PQConfigUpdate)
and (
vector_index_config.get("bq", {"enabled": False})["enabled"]
or vector_index_config.get("sq", {"enabled": False})["enabled"]
or vector_index_config.get("rq", {"enabled": False})["enabled"]
)
)
or (
isinstance(quantizer, _BQConfigUpdate)
and (
vector_index_config["pq"]["enabled"]
or vector_index_config.get("sq", {"enabled": False})["enabled"]
or vector_index_config.get("rq", {"enabled": False})["enabled"]
)
)
or (
isinstance(quantizer, _SQConfigUpdate)
and (
vector_index_config["pq"]["enabled"]
or vector_index_config.get("bq", {"enabled": False})["enabled"]
or vector_index_config.get("rq", {"enabled": False})["enabled"]
)
)
or (
isinstance(quantizer, _RQConfigUpdate)
and (
vector_index_config["pq"]["enabled"]
or vector_index_config.get("bq", {"enabled": False})["enabled"]
or vector_index_config.get("sq", {"enabled": False})["enabled"]
)
)
):
raise WeaviateInvalidInputError(
f"Cannot update vector index config {vector_index_config} to change its quantizer. To do this, you must recreate the collection."
)
return None
def merge_with_existing(self, schema: Dict[str, Any]) -> Dict[str, Any]:
if self.description is not None:
schema["description"] = self.description
if self.property_descriptions is not None:
if (p := schema["properties"]) is None:
raise WeaviateInvalidInputError(
"Cannot update property descriptions without existing properties in the schema"
)
props = {prop["name"]: prop for prop in p}
for prop_name, prop_desc in self.property_descriptions.items():
if prop_name not in props:
raise WeaviateInvalidInputError(
f"Property {prop_name} does not exist in the existing properties"
)
props[prop_name]["description"] = prop_desc
if self.invertedIndexConfig is not None:
schema["invertedIndexConfig"] = self.invertedIndexConfig.merge_with_existing(
schema["invertedIndexConfig"]
)
if self.replicationConfig is not None:
schema["replicationConfig"] = self.replicationConfig.merge_with_existing(
schema["replicationConfig"]
)
if self.multiTenancyConfig is not None:
schema["multiTenancyConfig"] = self.multiTenancyConfig.merge_with_existing(
schema["multiTenancyConfig"]
)
if self.vectorIndexConfig is not None:
self.__check_quantizers(self.vectorIndexConfig.quantizer, schema["vectorIndexConfig"])
schema["vectorIndexConfig"] = self.vectorIndexConfig.merge_with_existing(
schema["vectorIndexConfig"]
)
if self.generativeConfig is not None:
# clear any existing generative config
if "moduleConfig" in schema:
schema["moduleConfig"] = {
k: v for k, v in schema["moduleConfig"].items() if "generative" not in k
}
self.__add_to_module_config(
schema,
self.generativeConfig.generative.value,
self.generativeConfig._to_dict(),
)
if self.rerankerConfig is not None:
# clear any existing reranker config
if "moduleConfig" in schema:
schema["moduleConfig"] = {
k: v for k, v in schema["moduleConfig"].items() if "reranker" not in k
}
self.__add_to_module_config(
schema,
self.rerankerConfig.reranker.value,
self.rerankerConfig._to_dict(),
)
if self.vectorizerConfig is not None:
if isinstance(self.vectorizerConfig, _VectorIndexConfigUpdate):
self.__check_quantizers(
self.vectorizerConfig.quantizer, schema["vectorIndexConfig"]
)
schema["vectorIndexConfig"] = self.vectorizerConfig.merge_with_existing(
schema["vectorIndexConfig"]
)
else:
for vc in self.vectorizerConfig:
if vc.name not in schema["vectorConfig"]:
raise WeaviateInvalidInputError(
f"Vector config with name {vc.name} does not exist in the existing vector config"
)
self.__check_quantizers(
vc.vectorIndexConfig.quantizer,
schema["vectorConfig"][vc.name]["vectorIndexConfig"],
)
schema["vectorConfig"][vc.name]["vectorIndexConfig"] = (
vc.vectorIndexConfig.merge_with_existing(
schema["vectorConfig"][vc.name]["vectorIndexConfig"]
)
)
schema["vectorConfig"][vc.name]["vectorIndexType"] = (
vc.vectorIndexConfig.vector_index_type()
)
if self.vectorConfig is not None:
vcs = (
[self.vectorConfig]
if isinstance(self.vectorConfig, _VectorConfigUpdate)
else self.vectorConfig
)
for vc in vcs:
if vc.name not in schema["vectorConfig"]:
raise WeaviateInvalidInputError(
f"Vector config with name {vc.name} does not exist in the existing vector config"
)
self.__check_quantizers(
vc.vectorIndexConfig.quantizer,
schema["vectorConfig"][vc.name]["vectorIndexConfig"],
)
schema["vectorConfig"][vc.name]["vectorIndexConfig"] = (
vc.vectorIndexConfig.merge_with_existing(
schema["vectorConfig"][vc.name]["vectorIndexConfig"]
)
)
schema["vectorConfig"][vc.name]["vectorIndexType"] = (
vc.vectorIndexConfig.vector_index_type()
)
return schema
@staticmethod
def __add_to_module_config(
return_dict: Dict[str, Any], addition_key: str, addition_val: Dict[str, Any]
) -> None:
if "moduleConfig" not in return_dict:
return_dict["moduleConfig"] = {addition_key: addition_val}
else:
return_dict["moduleConfig"][addition_key] = addition_val
@dataclass
| _CollectionConfigUpdate |
python | falconry__falcon | examples/recipes/multipart_mixed_main.py | {
"start": 36,
"end": 595
} | class ____:
def on_post(self, req, resp):
example = {}
for part in req.media:
if part.content_type.startswith('multipart/mixed'):
for nested in part.media:
example[nested.filename] = nested.text
resp.media = example
parser = falcon.media.MultipartFormHandler()
parser.parse_options.media_handlers['multipart/mixed'] = (
falcon.media.MultipartFormHandler()
)
app = falcon.App()
app.req_options.media_handlers[falcon.MEDIA_MULTIPART] = parser
app.add_route('/forms', Forms())
| Forms |
python | doocs__leetcode | solution/1600-1699/1650.Lowest Common Ancestor of a Binary Tree III/Solution.py | {
"start": 177,
"end": 474
} | class ____:
def lowestCommonAncestor(self, p: "Node", q: "Node") -> "Node":
vis = set()
node = p
while node:
vis.add(node)
node = node.parent
node = q
while node not in vis:
node = node.parent
return node
| Solution |
python | realpython__materials | duck-typing-python/queues.py | {
"start": 32,
"end": 506
} | class ____:
def __init__(self):
self._elements = deque()
def enqueue(self, element):
self._elements.append(element)
def dequeue(self):
return self._elements.popleft()
def __iter__(self):
return iter(self._elements)
def __len__(self):
return len(self._elements)
def __reversed__(self):
return reversed(self._elements)
def __contains__(self, element):
return element in self._elements
| Queue |
python | plotly__plotly.py | plotly/graph_objs/surface/contours/x/_project.py | {
"start": 233,
"end": 5178
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "surface.contours.x"
_path_str = "surface.contours.x.project"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
Determines whether or not these contour lines are projected on
the x plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'x' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Determines whether or not these contour lines are projected on
the y plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'y' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
Determines whether or not these contour lines are projected on
the z plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'z' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Project object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.contours.x.Project`
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
Returns
-------
Project
"""
super().__init__("project")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.surface.contours.x.Project
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.contours.x.Project`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Project |
python | pallets__flask | src/flask/testing.py | {
"start": 3374,
"end": 8823
} | class ____(Client):
"""Works like a regular Werkzeug test client, with additional behavior for
Flask. Can defer the cleanup of the request context until the end of a
``with`` block. For general information about how to use this class refer to
:class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :doc:`/testing` chapter.
"""
application: Flask
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.preserve_context = False
self._new_contexts: list[t.ContextManager[t.Any]] = []
self._context_stack = ExitStack()
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": f"Werkzeug/{_get_werkzeug_version()}",
}
@contextmanager
def session_transaction(
self, *args: t.Any, **kwargs: t.Any
) -> t.Iterator[SessionMixin]:
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self._cookies is None:
raise TypeError(
"Cookies are disabled. Create a client with 'use_cookies=True'."
)
app = self.application
ctx = app.test_request_context(*args, **kwargs)
self._add_cookies_to_wsgi(ctx.request.environ)
with ctx:
sess = app.session_interface.open_session(app, ctx.request)
if sess is None:
raise RuntimeError("Session backend did not open a session.")
yield sess
resp = app.response_class()
if app.session_interface.is_null_session(sess):
return
with ctx:
app.session_interface.save_session(app, sess, resp)
self._update_cookies_from_response(
ctx.request.host.partition(":")[0],
ctx.request.path,
resp.headers.getlist("Set-Cookie"),
)
def _copy_environ(self, other: WSGIEnvironment) -> WSGIEnvironment:
out = {**self.environ_base, **other}
if self.preserve_context:
out["werkzeug.debug.preserve_context"] = self._new_contexts.append
return out
def _request_from_builder_args(
self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
) -> BaseRequest:
kwargs["environ_base"] = self._copy_environ(kwargs.get("environ_base", {}))
builder = EnvironBuilder(self.application, *args, **kwargs)
try:
return builder.get_request()
finally:
builder.close()
def open(
self,
*args: t.Any,
buffered: bool = False,
follow_redirects: bool = False,
**kwargs: t.Any,
) -> TestResponse:
if args and isinstance(
args[0], (werkzeug.test.EnvironBuilder, dict, BaseRequest)
):
if isinstance(args[0], werkzeug.test.EnvironBuilder):
builder = copy(args[0])
builder.environ_base = self._copy_environ(builder.environ_base or {}) # type: ignore[arg-type]
request = builder.get_request()
elif isinstance(args[0], dict):
request = EnvironBuilder.from_environ(
args[0], app=self.application, environ_base=self._copy_environ({})
).get_request()
else:
# isinstance(args[0], BaseRequest)
request = copy(args[0])
request.environ = self._copy_environ(request.environ)
else:
# request is None
request = self._request_from_builder_args(args, kwargs)
# Pop any previously preserved contexts. This prevents contexts
# from being preserved across redirects or multiple requests
# within a single block.
self._context_stack.close()
response = super().open(
request,
buffered=buffered,
follow_redirects=follow_redirects,
)
response.json_module = self.application.json # type: ignore[assignment]
# Re-push contexts that were preserved during the request.
for cm in self._new_contexts:
self._context_stack.enter_context(cm)
self._new_contexts.clear()
return response
def __enter__(self) -> FlaskClient:
if self.preserve_context:
raise RuntimeError("Cannot nest client invocations")
self.preserve_context = True
return self
def __exit__(
self,
exc_type: type | None,
exc_value: BaseException | None,
tb: TracebackType | None,
) -> None:
self.preserve_context = False
self._context_stack.close()
| FlaskClient |
python | sqlalchemy__sqlalchemy | examples/association/dict_of_sets_with_default.py | {
"start": 1205,
"end": 1290
} | class ____(DeclarativeBase):
id: Mapped[int] = mapped_column(primary_key=True)
| Base |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/self_assigning_variable.py | {
"start": 770,
"end": 809
} | class ____:
foo = foo
bar = bar
| Foo |
python | coleifer__peewee | tests/models.py | {
"start": 63507,
"end": 65585
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [AutoCounter]
def tearDown(self):
super(TestDefaultDirtyBehavior, self).tearDown()
AutoCounter._meta.only_save_dirty = False
def test_default_dirty(self):
AutoCounter._meta.only_save_dirty = True
ac = AutoCounter()
ac.save()
self.assertEqual(ac.counter, 1)
self.assertEqual(ac.control, 1)
ac_db = AutoCounter.get((AutoCounter.counter == 1) &
(AutoCounter.control == 1))
self.assertEqual(ac_db.counter, 1)
self.assertEqual(ac_db.control, 1)
# No changes.
self.assertFalse(ac_db.save())
ac = AutoCounter.create()
self.assertEqual(ac.counter, 2)
self.assertEqual(ac.control, 1)
AutoCounter._meta.only_save_dirty = False
ac = AutoCounter()
self.assertEqual(ac.counter, 3)
self.assertEqual(ac.control, 1)
ac.save()
ac_db = AutoCounter.get(AutoCounter.id == ac.id)
self.assertEqual(ac_db.counter, 3)
@requires_models(Person)
def test_save_only_dirty(self):
today = datetime.date.today()
try:
for only_save_dirty in (False, True):
Person._meta.only_save_dirty = only_save_dirty
p = Person.create(first='f', last='l', dob=today)
p.first = 'f2'
p.last = 'l2'
p.save(only=[Person.first])
self.assertEqual(p.dirty_fields, [Person.last])
p_db = Person.get(Person.id == p.id)
self.assertEqual((p_db.first, p_db.last), ('f2', 'l'))
p.save()
self.assertEqual(p.dirty_fields, [])
p_db = Person.get(Person.id == p.id)
self.assertEqual((p_db.first, p_db.last), ('f2', 'l2'))
p.delete_instance()
finally:
# Reset only_save_dirty property for other tests.
Person._meta.only_save_dirty = False
| TestDefaultDirtyBehavior |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_test.py | {
"start": 26054,
"end": 36092
} | class ____(unittest.TestCase):
"""Tests for the properties of descriptors in generated code."""
def CheckMessageDescriptor(self, message_descriptor):
# Basic properties
self.assertEqual(message_descriptor.name, 'TestAllTypes')
self.assertEqual(message_descriptor.full_name,
'proto2_unittest.TestAllTypes')
# Test equality and hashability
self.assertEqual(message_descriptor, message_descriptor)
self.assertEqual(message_descriptor.fields[0].containing_type,
message_descriptor)
self.assertIn(message_descriptor, [message_descriptor])
self.assertIn(message_descriptor, {message_descriptor: None})
# Test field containers
self.CheckDescriptorSequence(message_descriptor.fields)
self.CheckDescriptorMapping(message_descriptor.fields_by_name)
self.CheckDescriptorMapping(message_descriptor.fields_by_number)
self.CheckDescriptorMapping(message_descriptor.fields_by_camelcase_name)
self.CheckDescriptorMapping(message_descriptor.enum_types_by_name)
self.CheckDescriptorMapping(message_descriptor.enum_values_by_name)
self.CheckDescriptorMapping(message_descriptor.oneofs_by_name)
self.CheckDescriptorMapping(message_descriptor.enum_types[0].values_by_name)
# Test extension range
self.assertEqual(message_descriptor.extension_ranges, [])
def CheckFieldDescriptor(self, field_descriptor):
# Basic properties
self.assertEqual(field_descriptor.name, 'optional_int32')
self.assertEqual(field_descriptor.camelcase_name, 'optionalInt32')
self.assertEqual(field_descriptor.full_name,
'proto2_unittest.TestAllTypes.optional_int32')
self.assertEqual(field_descriptor.containing_type.name, 'TestAllTypes')
self.assertEqual(field_descriptor.file, unittest_pb2.DESCRIPTOR)
# Test equality and hashability
self.assertEqual(field_descriptor, field_descriptor)
self.assertEqual(
field_descriptor.containing_type.fields_by_name['optional_int32'],
field_descriptor)
self.assertEqual(
field_descriptor.containing_type.fields_by_camelcase_name[
'optionalInt32'],
field_descriptor)
self.assertIn(field_descriptor, [field_descriptor])
self.assertIn(field_descriptor, {field_descriptor: None})
self.assertEqual(None, field_descriptor.extension_scope)
self.assertEqual(None, field_descriptor.enum_type)
self.assertFalse(field_descriptor.is_required)
self.assertTrue(field_descriptor.has_presence)
if api_implementation.Type() == 'cpp':
# For test coverage only
self.assertEqual(field_descriptor.id, field_descriptor.id)
def CheckDescriptorSequence(self, sequence):
# Verifies that a property like 'messageDescriptor.fields' has all the
# properties of an immutable abc.Sequence.
self.assertNotEqual(sequence,
unittest_pb2.TestAllExtensions.DESCRIPTOR.fields)
self.assertNotEqual(sequence, [])
self.assertNotEqual(sequence, 1)
self.assertFalse(sequence == 1) # Only for cpp test coverage
self.assertEqual(sequence, sequence)
expected_list = list(sequence)
self.assertEqual(expected_list, sequence)
self.assertGreater(len(sequence), 0) # Sized
self.assertEqual(len(sequence), len(expected_list)) # Iterable
self.assertEqual(sequence[len(sequence) -1], sequence[-1])
item = sequence[0]
self.assertEqual(item, sequence[0])
self.assertIn(item, sequence) # Container
self.assertEqual(sequence.index(item), 0)
self.assertEqual(sequence.count(item), 1)
other_item = unittest_pb2.NestedTestAllTypes.DESCRIPTOR.fields[0]
self.assertNotIn(other_item, sequence)
self.assertEqual(sequence.count(other_item), 0)
self.assertRaises(ValueError, sequence.index, other_item)
self.assertRaises(ValueError, sequence.index, [])
reversed_iterator = reversed(sequence)
self.assertEqual(list(reversed_iterator), list(sequence)[::-1])
self.assertRaises(StopIteration, next, reversed_iterator)
expected_list[0] = 'change value'
self.assertNotEqual(expected_list, sequence)
# TODO: Change __repr__ support for DescriptorSequence.
if api_implementation.Type() == 'python':
self.assertEqual(str(list(sequence)), str(sequence))
else:
self.assertEqual(str(sequence)[0], '<')
def CheckDescriptorMapping(self, mapping):
# Verifies that a property like 'messageDescriptor.fields' has all the
# properties of an immutable abc.Mapping.
iterated_keys = []
for key in mapping:
iterated_keys.append(key)
self.assertEqual(len(iterated_keys), len(mapping))
self.assertEqual(set(iterated_keys), set(mapping.keys()))
self.assertNotEqual(
mapping, unittest_pb2.TestAllExtensions.DESCRIPTOR.fields_by_name)
self.assertNotEqual(mapping, {})
self.assertNotEqual(mapping, 1)
self.assertFalse(mapping == 1) # Only for cpp test coverage
excepted_dict = dict(mapping.items())
self.assertEqual(mapping, excepted_dict)
self.assertEqual(mapping, mapping)
self.assertGreater(len(mapping), 0) # Sized
self.assertEqual(len(mapping), len(excepted_dict)) # Iterable
key, item = next(iter(mapping.items()))
self.assertIn(key, mapping) # Container
self.assertEqual(mapping.get(key), item)
with self.assertRaises(TypeError):
mapping.get()
# TODO: Fix python and cpp extension diff.
if api_implementation.Type() == 'cpp':
self.assertEqual(None, mapping.get([]))
else:
self.assertRaises(TypeError, mapping.get, [])
with self.assertRaises(TypeError):
if [] in mapping:
pass
with self.assertRaises(TypeError):
_ = mapping[[]]
# keys(), iterkeys() &co
item = (next(iter(mapping.keys())), next(iter(mapping.values())))
self.assertEqual(item, next(iter(mapping.items())))
excepted_dict[key] = 'change value'
self.assertNotEqual(mapping, excepted_dict)
del excepted_dict[key]
excepted_dict['new_key'] = 'new'
self.assertNotEqual(mapping, excepted_dict)
self.assertRaises(KeyError, mapping.__getitem__, 'key_error')
self.assertRaises(KeyError, mapping.__getitem__, len(mapping) * 2)
# TODO: Add __repr__ support for DescriptorMapping.
if api_implementation.Type() == 'cpp':
self.assertEqual(str(mapping)[0], '<')
else:
print(str(dict(mapping.items()))[:100])
print(str(mapping)[:100])
self.assertEqual(len(str(dict(mapping.items()))), len(str(mapping)))
def testDescriptor(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.CheckMessageDescriptor(message_descriptor)
field_descriptor = message_descriptor.fields_by_name['optional_int32']
self.CheckFieldDescriptor(field_descriptor)
field_descriptor = message_descriptor.fields_by_camelcase_name[
'optionalInt32']
self.CheckFieldDescriptor(field_descriptor)
enum_descriptor = unittest_pb2.DESCRIPTOR.enum_types_by_name[
'ForeignEnum']
self.assertEqual(None, enum_descriptor.containing_type)
# Test extension range
self.assertEqual(
unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges,
[(1, 536870912)])
self.assertEqual(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges,
[(42, 43), (4143, 4244), (65536, 536870912)])
def testCppDescriptorContainer(self):
containing_file = unittest_pb2.DESCRIPTOR
self.CheckDescriptorSequence(containing_file.dependencies)
self.CheckDescriptorMapping(containing_file.message_types_by_name)
self.CheckDescriptorMapping(containing_file.enum_types_by_name)
self.CheckDescriptorMapping(containing_file.services_by_name)
self.CheckDescriptorMapping(containing_file.extensions_by_name)
self.CheckDescriptorMapping(
unittest_pb2.TestNestedExtension.DESCRIPTOR.extensions_by_name)
def testCppDescriptorContainer_Iterator(self):
# Same test with the iterator
enum = unittest_pb2.TestAllTypes.DESCRIPTOR.enum_types_by_name['NestedEnum']
values_iter = iter(enum.values)
del enum
self.assertEqual('FOO', next(values_iter).name)
def testDescriptorNestedTypesContainer(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
nested_message_descriptor = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR
self.assertEqual(len(message_descriptor.nested_types), 3)
self.assertFalse(None in message_descriptor.nested_types)
self.assertTrue(
nested_message_descriptor in message_descriptor.nested_types)
def testServiceDescriptor(self):
service_descriptor = unittest_pb2.DESCRIPTOR.services_by_name['TestService']
self.assertEqual(service_descriptor.name, 'TestService')
self.assertEqual(service_descriptor.methods[0].name, 'Foo')
self.assertIs(service_descriptor.file, unittest_pb2.DESCRIPTOR)
self.assertEqual(service_descriptor.index, 0)
self.CheckDescriptorMapping(service_descriptor.methods_by_name)
self.assertFalse(service_descriptor.has_options)
def testOneofDescriptor(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
oneof_descriptor = message_descriptor.oneofs_by_name['oneof_field']
self.assertFalse(oneof_descriptor.has_options)
self.assertEqual(message_descriptor, oneof_descriptor.containing_type)
self.assertEqual('oneof_field', oneof_descriptor.name)
self.assertEqual('proto2_unittest.TestAllTypes.oneof_field',
oneof_descriptor.full_name)
self.assertEqual(0, oneof_descriptor.index)
def testDescriptorSlice(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
nested = message_descriptor.nested_types[:]
self.assertEqual(message_descriptor.nested_types, nested)
fields = message_descriptor.fields
fields_list = list(fields)
self.assertEqual(fields_list[:], fields[:])
self.assertEqual(fields_list[2::2], fields[2::2])
self.assertEqual(fields_list[3:19:3], fields[3:19:3])
| GeneratedDescriptorTest |
python | Pylons__pyramid | src/pyramid/csrf.py | {
"start": 2862,
"end": 12965
} | class ____:
"""An alternative CSRF implementation that stores its information in
unauthenticated cookies, known as the 'Double Submit Cookie' method in the
`OWASP CSRF guidelines
<https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie>`_.
This gives some additional flexibility with
regards to scaling as the tokens can be generated and verified by a
front-end server.
.. versionadded:: 1.9
.. versionchanged: 1.10
Added the ``samesite`` option and made the default ``'Lax'``.
"""
_token_factory = staticmethod(lambda: text_(uuid.uuid4().hex))
def __init__(
self,
cookie_name='csrf_token',
secure=False,
httponly=False,
domain=None,
max_age=None,
path='/',
samesite='Lax',
):
self.cookie_profile = CookieProfile(
cookie_name=cookie_name,
secure=secure,
max_age=max_age,
httponly=httponly,
path=path,
domains=[domain],
serializer=SimpleSerializer(),
samesite=samesite,
)
self.cookie_name = cookie_name
def new_csrf_token(self, request):
"""Sets a new CSRF token into the request and returns it."""
token = self._token_factory()
request.cookies[self.cookie_name] = token
def set_cookie(request, response):
self.cookie_profile.set_cookies(response, token)
request.add_response_callback(set_cookie)
return token
def get_csrf_token(self, request):
"""Returns the currently active CSRF token by checking the cookies
sent with the current request."""
bound_cookies = self.cookie_profile.bind(request)
token = bound_cookies.get_value()
if not token:
token = self.new_csrf_token(request)
return token
def check_csrf_token(self, request, supplied_token):
"""Returns ``True`` if the ``supplied_token`` is valid."""
expected_token = self.get_csrf_token(request)
return not strings_differ(
bytes_(expected_token), bytes_(supplied_token)
)
def get_csrf_token(request):
"""Get the currently active CSRF token for the request passed, generating
a new one using ``new_csrf_token(request)`` if one does not exist. This
calls the equivalent method in the chosen CSRF protection implementation.
.. versionadded :: 1.9
"""
registry = request.registry
csrf = registry.getUtility(ICSRFStoragePolicy)
return csrf.get_csrf_token(request)
def new_csrf_token(request):
"""Generate a new CSRF token for the request passed and persist it in an
implementation defined manner. This calls the equivalent method in the
chosen CSRF protection implementation.
.. versionadded :: 1.9
"""
registry = request.registry
csrf = registry.getUtility(ICSRFStoragePolicy)
return csrf.new_csrf_token(request)
def check_csrf_token(
request, token='csrf_token', header='X-CSRF-Token', raises=True
):
"""Check the CSRF token returned by the
:class:`pyramid.interfaces.ICSRFStoragePolicy` implementation against the
value in ``request.POST.get(token)`` (if a POST request) or
``request.headers.get(header)``. If a ``token`` keyword is not supplied to
this function, the string ``csrf_token`` will be used to look up the token
in ``request.POST``. If a ``header`` keyword is not supplied to this
function, the string ``X-CSRF-Token`` will be used to look up the token in
``request.headers``.
If the value supplied by post or by header cannot be verified by the
:class:`pyramid.interfaces.ICSRFStoragePolicy`, and ``raises`` is
``True``, this function will raise an
:exc:`pyramid.exceptions.BadCSRFToken` exception. If the values differ
and ``raises`` is ``False``, this function will return ``False``. If the
CSRF check is successful, this function will return ``True``
unconditionally.
See :ref:`auto_csrf_checking` for information about how to secure your
application automatically against CSRF attacks.
.. versionadded:: 1.4a2
.. versionchanged:: 1.7a1
A CSRF token passed in the query string of the request is no longer
considered valid. It must be passed in either the request body or
a header.
.. versionchanged:: 1.9
Moved from :mod:`pyramid.session` to :mod:`pyramid.csrf` and updated
to use the configured :class:`pyramid.interfaces.ICSRFStoragePolicy` to
verify the CSRF token.
"""
supplied_token = ""
# We first check the headers for a csrf token, as that is significantly
# cheaper than checking the POST body
if header is not None:
supplied_token = request.headers.get(header, "")
# If this is a POST/PUT/etc request, then we'll check the body to see if it
# has a token. We explicitly use request.POST here because CSRF tokens
# should never appear in an URL as doing so is a security issue. We also
# explicitly check for request.POST here as we do not support sending form
# encoded data over anything but a request.POST.
if supplied_token == "" and token is not None:
supplied_token = request.POST.get(token, "")
policy = request.registry.getUtility(ICSRFStoragePolicy)
if not policy.check_csrf_token(request, text_(supplied_token)):
if raises:
raise BadCSRFToken('check_csrf_token(): Invalid token')
return False
return True
def check_csrf_origin(
request, *, trusted_origins=None, allow_no_origin=False, raises=True
):
"""
Check the ``Origin`` of the request to see if it is a cross site request or
not.
If the value supplied by the ``Origin`` or ``Referer`` header isn't one of
the trusted origins and ``raises`` is ``True``, this function will raise a
:exc:`pyramid.exceptions.BadCSRFOrigin` exception, but if ``raises`` is
``False``, this function will return ``False`` instead. If the CSRF origin
checks are successful this function will return ``True`` unconditionally.
Additional trusted origins may be added by passing a list of domain (and
ports if non-standard like ``['example.com', 'dev.example.com:8080']``) in
with the ``trusted_origins`` parameter. If ``trusted_origins`` is ``None``
(the default) this list of additional domains will be pulled from the
``pyramid.csrf_trusted_origins`` setting.
``allow_no_origin`` determines whether to return ``True`` when the
origin cannot be determined via either the ``Referer`` or ``Origin``
header. The default is ``False`` which will reject the check.
Note that this function will do nothing if ``request.scheme`` is not
``https``.
.. versionadded:: 1.7
.. versionchanged:: 1.9
Moved from :mod:`pyramid.session` to :mod:`pyramid.csrf`
.. versionchanged:: 2.0
Added the ``allow_no_origin`` option.
"""
def _fail(reason):
if raises:
raise BadCSRFOrigin("Origin checking failed - " + reason)
else:
return False
# Origin checks are only trustworthy / useful on HTTPS requests.
if request.scheme != "https":
return True
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM when we cannot make any assumptions
# about what kind of session storage is being used. So the MITM can
# circumvent the CSRF protection. This is true for any HTTP connection,
# but anyone using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
# Determine the origin of this request
origin = request.headers.get("Origin")
origin_is_referrer = False
if origin is None:
origin = request.referrer
origin_is_referrer = True
else:
# use the last origin in the list under the assumption that the
# server generally appends values and we want the origin closest
# to us
origin = origin.split(' ')[-1]
# If we can't find an origin, fail or pass immediately depending on
# ``allow_no_origin``
if not origin:
if allow_no_origin:
return True
else:
return _fail("missing Origin or Referer.")
# Determine which origins we trust, which by default will include the
# current origin.
if trusted_origins is None:
trusted_origins = aslist(
request.registry.settings.get("pyramid.csrf_trusted_origins", [])
)
if request.host_port not in {"80", "443"}:
trusted_origins.append("{0.domain}:{0.host_port}".format(request))
else:
trusted_origins.append(request.domain)
# Check "Origin: null" against trusted_origins
if not origin_is_referrer and origin == 'null':
if origin in trusted_origins:
return True
else:
return _fail("null does not match any trusted origins.")
# Parse our origin so we we can extract the required information from
# it.
originp = urlparse(origin)
# Ensure that our Referer is also secure.
if originp.scheme != "https":
return _fail("Origin is insecure while host is secure.")
# Actually check to see if the request's origin matches any of our
# trusted origins.
if not any(
is_same_domain(originp.netloc, host) for host in trusted_origins
):
return _fail(f"{origin} does not match any trusted origins.")
return True
| CookieCSRFStoragePolicy |
python | fsspec__filesystem_spec | fsspec/caching.py | {
"start": 15924,
"end": 19479
} | class ____(BaseCache):
"""Cache which holds data in a in-memory bytes object
Implements read-ahead by the block size, for semi-random reads progressing
through the file.
Parameters
----------
trim: bool
As we read more data, whether to discard the start of the buffer when
we are more than a blocksize ahead of it.
"""
name: ClassVar[str] = "bytes"
def __init__(
self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
) -> None:
super().__init__(blocksize, fetcher, size)
self.cache = b""
self.start: int | None = None
self.end: int | None = None
self.trim = trim
def _fetch(self, start: int | None, end: int | None) -> bytes:
# TODO: only set start/end after fetch, in case it fails?
# is this where retry logic might go?
if start is None:
start = 0
if end is None:
end = self.size
if start >= self.size or start >= end:
return b""
if (
self.start is not None
and start >= self.start
and self.end is not None
and end < self.end
):
# cache hit: we have all the required data
offset = start - self.start
self.hit_count += 1
return self.cache[offset : offset + end - start]
if self.blocksize:
bend = min(self.size, end + self.blocksize)
else:
bend = end
if bend == start or start > self.size:
return b""
if (self.start is None or start < self.start) and (
self.end is None or end > self.end
):
# First read, or extending both before and after
self.total_requested_bytes += bend - start
self.miss_count += 1
self.cache = self.fetcher(start, bend)
self.start = start
else:
assert self.start is not None
assert self.end is not None
self.miss_count += 1
if start < self.start:
if self.end is None or self.end - end > self.blocksize:
self.total_requested_bytes += bend - start
self.cache = self.fetcher(start, bend)
self.start = start
else:
self.total_requested_bytes += self.start - start
new = self.fetcher(start, self.start)
self.start = start
self.cache = new + self.cache
elif self.end is not None and bend > self.end:
if self.end > self.size:
pass
elif end - self.end > self.blocksize:
self.total_requested_bytes += bend - start
self.cache = self.fetcher(start, bend)
self.start = start
else:
self.total_requested_bytes += bend - self.end
new = self.fetcher(self.end, bend)
self.cache = self.cache + new
self.end = self.start + len(self.cache)
offset = start - self.start
out = self.cache[offset : offset + end - start]
if self.trim:
num = (self.end - self.start) // (self.blocksize + 1)
if num > 1:
self.start += self.blocksize * num
self.cache = self.cache[self.blocksize * num :]
return out
def __len__(self) -> int:
return len(self.cache)
| BytesCache |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_glue.py | {
"start": 27521,
"end": 32905
} | class ____:
RUN_ID = "1234567890"
DATA_SOURCE = {"GlueTable": {"DatabaseName": "TestDB", "TableName": "TestTable"}}
ROLE = "role_arn"
@pytest.fixture
def mock_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(GlueDataQualityHook, "conn") as _conn:
_conn.start_data_quality_rule_recommendation_run.return_value = {"RunId": self.RUN_ID}
yield _conn
@pytest.fixture
def glue_data_quality_hook(self) -> Generator[GlueDataQualityHook, None, None]:
with mock_aws():
hook = GlueDataQualityHook(aws_conn_id="aws_default")
yield hook
def setup_method(self):
self.operator = GlueDataQualityRuleRecommendationRunOperator(
task_id="start_recommendation_run",
datasource=self.DATA_SOURCE,
role=self.ROLE,
show_results=False,
recommendation_run_kwargs={"CreatedRulesetName": "test-ruleset"},
)
self.operator.defer = mock.MagicMock()
def test_init(self):
assert self.operator.datasource == self.DATA_SOURCE
assert self.operator.role == self.ROLE
assert self.operator.show_results is False
assert self.operator.recommendation_run_kwargs == {"CreatedRulesetName": "test-ruleset"}
@mock.patch.object(GlueDataQualityHook, "conn")
def test_start_data_quality_rule_recommendation_run(self, glue_data_quality_mock_conn):
self.op = GlueDataQualityRuleRecommendationRunOperator(
task_id="start_recommendation_run",
datasource=self.DATA_SOURCE,
role=self.ROLE,
number_of_workers=10,
timeout=1000,
recommendation_run_kwargs={"CreatedRulesetName": "test-ruleset"},
)
self.op.wait_for_completion = False
self.op.execute({})
glue_data_quality_mock_conn.start_data_quality_rule_recommendation_run.assert_called_once_with(
DataSource=self.DATA_SOURCE,
Role=self.ROLE,
NumberOfWorkers=10,
Timeout=1000,
CreatedRulesetName="test-ruleset",
)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_start_data_quality_rule_recommendation_run_failed(self, glue_data_quality_mock_conn):
created_ruleset_name = "test-ruleset"
error_message = f"Ruleset {created_ruleset_name} already exists"
err_response = {"Error": {"Code": "InvalidInputException", "Message": error_message}}
exception = boto3.client("glue").exceptions.ClientError(
err_response, "StartDataQualityRuleRecommendationRun"
)
returned_exception = type(exception)
glue_data_quality_mock_conn.exceptions.InvalidInputException = returned_exception
glue_data_quality_mock_conn.start_data_quality_rule_recommendation_run.side_effect = exception
operator = GlueDataQualityRuleRecommendationRunOperator(
task_id="stat_recommendation_run",
datasource=self.DATA_SOURCE,
role=self.ROLE,
recommendation_run_kwargs={"CreatedRulesetName": created_ruleset_name},
)
operator.wait_for_completion = False
with pytest.raises(
AirflowException,
match=f"AWS Glue data quality recommendation run failed: Ruleset {created_ruleset_name} already exists",
):
operator.execute({})
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(GlueDataQualityHook, "get_waiter")
def test_start_data_quality_rule_recommendation_run_wait_combinations(
self, _, wait_for_completion, deferrable, mock_conn, glue_data_quality_hook
):
self.operator.wait_for_completion = wait_for_completion
self.operator.deferrable = deferrable
response = self.operator.execute({})
assert response == self.RUN_ID
assert glue_data_quality_hook.get_waiter.call_count == wait_for_completion
assert self.operator.defer.call_count == deferrable
def test_template_fields(self):
validate_template_fields(self.operator)
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = GlueDataQualityRuleRecommendationRunOperator(
task_id="test_overwritten_conn_passed_to_hook",
datasource=self.DATA_SOURCE,
role=self.ROLE,
number_of_workers=10,
timeout=1000,
recommendation_run_kwargs={"CreatedRulesetName": "test-ruleset"},
aws_conn_id=OVERWRITTEN_CONN,
)
assert op.hook.aws_conn_id == OVERWRITTEN_CONN
def test_default_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = GlueDataQualityRuleRecommendationRunOperator(
task_id="test_default_conn_passed_to_hook",
datasource=self.DATA_SOURCE,
role=self.ROLE,
number_of_workers=10,
timeout=1000,
recommendation_run_kwargs={"CreatedRulesetName": "test-ruleset"},
)
assert op.hook.aws_conn_id == DEFAULT_CONN
| TestGlueDataQualityRuleRecommendationRunOperator |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/sentry_app_webhook_requests.py | {
"start": 1110,
"end": 2158
} | class ____(serializers.Serializer):
date_format = "%Y-%m-%d %H:%M:%S"
eventType = serializers.ChoiceField(
choices=EXTENDED_VALID_EVENTS,
required=False,
)
errorsOnly = serializers.BooleanField(default=False, required=False)
organizationSlug = serializers.CharField(required=False)
start = serializers.DateTimeField(
format=date_format,
default=datetime.now(tz=timezone.utc) - timedelta(days=30),
default_timezone=timezone.utc,
required=False,
)
end = serializers.DateTimeField(
format=date_format, default=None, default_timezone=timezone.utc, required=False
)
def validate(self, data):
if "start" in data and "end" in data and data["start"] > data["end"]:
raise serializers.ValidationError("Invalid timestamp (start must be before end).")
return data
def validate_end(self, end):
if end is None:
end = datetime.now(tz=timezone.utc)
return end
@control_silo_endpoint
| IncomingRequestSerializer |
python | sympy__sympy | sympy/geometry/entity.py | {
"start": 1746,
"end": 16828
} | class ____(Basic, EvalfMixin):
"""The base class for all geometrical entities.
This class does not represent any particular geometric entity, it only
provides the implementation of some methods common to all subclasses.
"""
__slots__: tuple[str, ...] = ()
def __contains__(self, other):
"""Subclasses should implement this method for anything more complex than equality."""
if type(self) is type(other):
return self == other
raise NotImplementedError()
def __getnewargs__(self):
"""Returns a tuple that will be passed to __new__ on unpickling."""
return tuple(self.args)
def __ne__(self, o):
"""Test inequality of two geometrical entities."""
return not self == o
def __new__(cls, *args, **kwargs):
# Points are sequences, but they should not
# be converted to Tuples, so use this detection function instead.
def is_seq_and_not_point(a):
# we cannot use isinstance(a, Point) since we cannot import Point
if hasattr(a, 'is_Point') and a.is_Point:
return False
return is_sequence(a)
args = [Tuple(*a) if is_seq_and_not_point(a) else sympify(a) for a in args]
return Basic.__new__(cls, *args)
def __radd__(self, a):
"""Implementation of reverse add method."""
return a.__add__(self)
def __rtruediv__(self, a):
"""Implementation of reverse division method."""
return a.__truediv__(self)
def __repr__(self):
"""String representation of a GeometryEntity that can be evaluated
by sympy."""
return type(self).__name__ + repr(self.args)
def __rmul__(self, a):
"""Implementation of reverse multiplication method."""
return a.__mul__(self)
def __rsub__(self, a):
"""Implementation of reverse subtraction method."""
return a.__sub__(self)
def __str__(self):
"""String representation of a GeometryEntity."""
return type(self).__name__ + sstr(self.args)
def _eval_subs(self, old, new):
from sympy.geometry.point import Point, Point3D
if is_sequence(old) or is_sequence(new):
if isinstance(self, Point3D):
old = Point3D(old)
new = Point3D(new)
else:
old = Point(old)
new = Point(new)
return self._subs(old, new)
def _repr_svg_(self):
"""SVG representation of a GeometryEntity suitable for IPython"""
try:
bounds = self.bounds
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
if not all(x.is_number and x.is_finite for x in bounds):
return None
svg_top = '''<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
width="{1}" height="{2}" viewBox="{0}"
preserveAspectRatio="xMinYMin meet">
<defs>
<marker id="markerCircle" markerWidth="8" markerHeight="8"
refx="5" refy="5" markerUnits="strokeWidth">
<circle cx="5" cy="5" r="1.5" style="stroke: none; fill:#000000;"/>
</marker>
<marker id="markerArrow" markerWidth="13" markerHeight="13" refx="2" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M2,2 L2,6 L6,4" style="fill: #000000;" />
</marker>
<marker id="markerReverseArrow" markerWidth="13" markerHeight="13" refx="6" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M6,2 L6,6 L2,4" style="fill: #000000;" />
</marker>
</defs>'''
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = map(N, bounds)
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .5, ymin -.5, xmax + .5, ymax + .5
else:
# Expand bounds by a fraction of the data ranges
expand = 0.1 # or 10%; this keeps arrowheads in view (R plots use 4%)
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
scale_factor = 1. if max(width, height) == 0 else max(dx, dy) / max(width, height)
try:
svg = self._svg(scale_factor)
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
view_box = "{} {} {} {}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{})".format(ymax + ymin)
svg_top = svg_top.format(view_box, width, height)
return svg_top + (
'<g transform="{}">{}</g></svg>'
).format(transform, svg)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the GeometryEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
raise NotImplementedError()
def _sympy_(self):
return self
@property
def ambient_dimension(self):
"""What is the dimension of the space that the object is contained in?"""
raise NotImplementedError()
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
raise NotImplementedError()
def encloses(self, o):
"""
Return True if o is inside (not on or outside) the boundaries of self.
The object will be decomposed into Points and individual Entities need
only define an encloses_point method for their class.
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
sympy.geometry.polygon.Polygon.encloses_point
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t2 = Polygon(*RegularPolygon(Point(0, 0), 2, 3).vertices)
>>> t2.encloses(t)
True
>>> t.encloses(t2)
False
"""
from sympy.geometry.point import Point
from sympy.geometry.line import Segment, Ray, Line
from sympy.geometry.ellipse import Ellipse
from sympy.geometry.polygon import Polygon, RegularPolygon
if isinstance(o, Point):
return self.encloses_point(o)
elif isinstance(o, Segment):
return all(self.encloses_point(x) for x in o.points)
elif isinstance(o, (Ray, Line)):
return False
elif isinstance(o, Ellipse):
return self.encloses_point(o.center) and \
self.encloses_point(
Point(o.center.x + o.hradius, o.center.y)) and \
not self.intersection(o)
elif isinstance(o, Polygon):
if isinstance(o, RegularPolygon):
if not self.encloses_point(o.center):
return False
return all(self.encloses_point(v) for v in o.vertices)
raise NotImplementedError()
def equals(self, o):
return self == o
def intersection(self, o):
"""
Returns a list of all of the intersections of self with o.
Notes
=====
An entity is not required to implement this method.
If two different types of entities can intersect, the item with
higher index in ordering_of_classes should implement
intersections with anything having a lower index.
See Also
========
sympy.geometry.util.intersection
"""
raise NotImplementedError()
def is_similar(self, other):
"""Is this geometrical entity similar to another geometrical entity?
Two entities are similar if a uniform scaling (enlarging or
shrinking) of one of the entities will allow one to obtain the other.
Notes
=====
This method is not intended to be used directly but rather
through the `are_similar` function found in util.py.
An entity is not required to implement this method.
If two different types of entities can be similar, it is only
required that one of them be able to determine this.
See Also
========
scale
"""
raise NotImplementedError()
def reflect(self, line):
"""
Reflects an object across a line.
Parameters
==========
line: Line
Examples
========
>>> from sympy import pi, sqrt, Line, RegularPolygon
>>> l = Line((0, pi), slope=sqrt(2))
>>> pent = RegularPolygon((1, 2), 1, 5)
>>> rpent = pent.reflect(l)
>>> rpent
RegularPolygon(Point2D(-2*sqrt(2)*pi/3 - 1/3 + 4*sqrt(2)/3, 2/3 + 2*sqrt(2)/3 + 2*pi/3), -1, 5, -atan(2*sqrt(2)) + 3*pi/5)
>>> from sympy import pi, Line, Circle, Point
>>> l = Line((0, pi), slope=1)
>>> circ = Circle(Point(0, 0), 5)
>>> rcirc = circ.reflect(l)
>>> rcirc
Circle(Point2D(-pi, pi), -5)
"""
from sympy.geometry.point import Point
g = self
l = line
o = Point(0, 0)
if l.slope.is_zero:
v = l.args[0].y
if not v: # x-axis
return g.scale(y=-1)
reps = [(p, p.translate(y=2*(v - p.y))) for p in g.atoms(Point)]
elif l.slope is oo:
v = l.args[0].x
if not v: # y-axis
return g.scale(x=-1)
reps = [(p, p.translate(x=2*(v - p.x))) for p in g.atoms(Point)]
else:
if not hasattr(g, 'reflect') and not all(
isinstance(arg, Point) for arg in g.args):
raise NotImplementedError(
'reflect undefined or non-Point args in %s' % g)
a = atan(l.slope)
c = l.coefficients
d = -c[-1]/c[1] # y-intercept
# apply the transform to a single point
xf = Point(x, y)
xf = xf.translate(y=-d).rotate(-a, o).scale(y=-1
).rotate(a, o).translate(y=d)
# replace every point using that transform
reps = [(p, xf.xreplace({x: p.x, y: p.y})) for p in g.atoms(Point)]
return g.xreplace(dict(reps))
def rotate(self, angle, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
The default pt is the origin, Point(0, 0)
See Also
========
scale, translate
Examples
========
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t # vertex on x axis
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.rotate(pi/2) # vertex on y axis now
Triangle(Point2D(0, 1), Point2D(-sqrt(3)/2, -1/2), Point2D(sqrt(3)/2, -1/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.rotate(angle, pt))
else:
newargs.append(a)
return type(self)(*newargs)
def scale(self, x=1, y=1, pt=None):
"""Scale the object by multiplying the x,y-coordinates by x and y.
If pt is given, the scaling is done relative to that point; the
object is shifted by -pt, scaled, and shifted by pt.
See Also
========
rotate, translate
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.scale(2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)/2), Point2D(-1, -sqrt(3)/2))
>>> t.scale(2, 2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)), Point2D(-1, -sqrt(3)))
"""
from sympy.geometry.point import Point
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
return type(self)(*[a.scale(x, y) for a in self.args]) # if this fails, override this class
def translate(self, x=0, y=0):
"""Shift the object by adding to the x,y-coordinates the values x and y.
See Also
========
rotate, scale
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.translate(2)
Triangle(Point2D(3, 0), Point2D(3/2, sqrt(3)/2), Point2D(3/2, -sqrt(3)/2))
>>> t.translate(2, 2)
Triangle(Point2D(3, 2), Point2D(3/2, sqrt(3)/2 + 2), Point2D(3/2, 2 - sqrt(3)/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.translate(x, y))
else:
newargs.append(a)
return self.func(*newargs)
def parameter_value(self, other, t):
"""Return the parameter corresponding to the given point.
Evaluating an arbitrary point of the entity at this parameter
value will return the given point.
Examples
========
>>> from sympy import Line, Point
>>> from sympy.abc import t
>>> a = Point(0, 0)
>>> b = Point(2, 2)
>>> Line(a, b).parameter_value((1, 1), t)
{t: 1/2}
>>> Line(a, b).arbitrary_point(t).subs(_)
Point2D(1, 1)
"""
from sympy.geometry.point import Point
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
sol = solve(self.arbitrary_point(T) - other, T, dict=True)
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return {t: sol[0][T]}
| GeometryEntity |
python | realpython__materials | python-mutable-immutable/immutable.py | {
"start": 0,
"end": 299
} | class ____:
def __init__(self, value):
super().__setattr__("value", value)
def __setattr__(self, name, attr_value):
raise AttributeError(f"can't set attribute '{name}'")
def __delattr__(self, name):
raise AttributeError(f"can't delete attribute '{name}'")
| Immutable |
python | pytest-dev__pytest | testing/test_recwarn.py | {
"start": 4335,
"end": 8751
} | class ____:
"""test pytest.deprecated_call()"""
def dep(self, i: int, j: int | None = None) -> int:
if i == 0:
warnings.warn("is deprecated", DeprecationWarning, stacklevel=1)
return 42
def dep_explicit(self, i: int) -> None:
if i == 0:
warnings.warn_explicit(
"dep_explicit", category=DeprecationWarning, filename="hello", lineno=3
)
def test_deprecated_call_raises(self) -> None:
with pytest.raises(pytest.fail.Exception, match="No warnings of type"):
pytest.deprecated_call(self.dep, 3, 5)
def test_deprecated_call(self) -> None:
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self) -> None:
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self) -> None:
# Type ignored because `onceregistry` and `filters` are not
# documented API.
onceregistry = warnings.onceregistry.copy() # type: ignore
filters = warnings.filters[:]
warn = warnings.warn
warn_explicit = warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == warnings.onceregistry # type: ignore
assert filters == warnings.filters
assert warn is warnings.warn
assert warn_explicit is warnings.warn_explicit
def test_deprecated_explicit_call_raises(self) -> None:
with pytest.raises(pytest.fail.Exception):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self) -> None:
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
@pytest.mark.parametrize("mode", ["context_manager", "call"])
def test_deprecated_call_no_warning(self, mode) -> None:
"""Ensure deprecated_call() raises the expected failure when its block/function does
not raise a deprecation warning.
"""
def f():
pass
msg = "No warnings of type (.*DeprecationWarning.*, .*PendingDeprecationWarning.*)"
with pytest.raises(pytest.fail.Exception, match=msg):
if mode == "call":
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
@pytest.mark.parametrize(
"warning_type", [PendingDeprecationWarning, DeprecationWarning, FutureWarning]
)
@pytest.mark.parametrize("mode", ["context_manager", "call"])
@pytest.mark.parametrize("call_f_first", [True, False])
@pytest.mark.filterwarnings("ignore")
def test_deprecated_call_modes(self, warning_type, mode, call_f_first) -> None:
"""Ensure deprecated_call() captures a deprecation warning as expected inside its
block/function.
"""
def f():
warnings.warn(warning_type("hi"))
return 10
# ensure deprecated_call() can capture the warning even if it has already been triggered
if call_f_first:
assert f() == 10
if mode == "call":
assert pytest.deprecated_call(f) == 10
else:
with pytest.deprecated_call():
assert f() == 10
def test_deprecated_call_specificity(self) -> None:
other_warnings = [
Warning,
UserWarning,
SyntaxWarning,
RuntimeWarning,
ImportWarning,
UnicodeWarning,
]
for warning in other_warnings:
def f():
warnings.warn(warning("hi")) # noqa: B023
with pytest.warns(warning):
with pytest.raises(pytest.fail.Exception):
pytest.deprecated_call(f)
with pytest.raises(pytest.fail.Exception):
with pytest.deprecated_call():
f()
def test_deprecated_call_supports_match(self) -> None:
with pytest.deprecated_call(match=r"must be \d+$"):
warnings.warn("value must be 42", DeprecationWarning)
with pytest.deprecated_call():
with pytest.raises(pytest.fail.Exception, match="DID NOT WARN"):
with pytest.deprecated_call(match=r"must be \d+$"):
warnings.warn("this is not here", DeprecationWarning)
| TestDeprecatedCall |
python | keras-team__keras | keras/src/ops/operation_test.py | {
"start": 2649,
"end": 3056
} | class ____(operation.Operation):
def __init__(self, alpha, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
def call(self, x):
return self.alpha * x
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
def get_config(self):
return {**super().get_config(), "alpha": self.alpha}
| OpWithKwargsInConstructorGetConfig |
python | HIPS__autograd | autograd/builtins.py | {
"start": 5816,
"end": 6257
} | class ____(ContainerVSpace):
def _values(self, x):
return x.values()
def _kv_pairs(self, x):
return x.items()
def _map(self, f, *args):
return {k: f(vs, *[x[k] for x in args]) for k, vs in self.shape.items()}
def _subval(self, xs, idx, x):
d = dict(xs.items())
d[idx] = x
return d
ListVSpace.register(list_)
TupleVSpace.register(tuple_)
DictVSpace.register(dict_)
| DictVSpace |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 99706,
"end": 104510
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("hyperparameter_tuning_job.types.HyperparameterTuningJob.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("hyperparameter_tuning_job.HyperparameterTuningJobHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = CreateHyperparameterTuningJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
worker_pool_specs=[],
parameter_spec={},
metric_spec={},
max_trial_count=15,
parallel_trial_count=3,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.create_hyperparameter_tuning_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
metric_spec={},
parameter_spec={},
max_trial_count=15,
parallel_trial_count=3,
worker_pool_specs=[],
base_output_dir=None,
custom_job_labels=None,
custom_job_encryption_spec_key_name=None,
staging_bucket=STAGING_BUCKET,
max_failed_trial_count=0,
search_algorithm=None,
measurement_selection="best",
hyperparameter_tuning_job_labels=None,
hyperparameter_tuning_job_encryption_spec_key_name=None,
service_account=None,
network=None,
timeout=None,
restart_job_on_worker_restart=False,
enable_web_access=False,
tensorboard=None,
sync=False,
wait_job_completed=False,
)
@mock.patch(
VERTEX_AI_PATH.format("hyperparameter_tuning_job.CreateHyperparameterTuningJobOperator.defer")
)
@mock.patch(VERTEX_AI_PATH.format("hyperparameter_tuning_job.HyperparameterTuningJobHook"))
def test_deferrable(self, mock_hook, mock_defer):
op = CreateHyperparameterTuningJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
worker_pool_specs=[],
parameter_spec={},
metric_spec={},
max_trial_count=15,
parallel_trial_count=3,
deferrable=True,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_defer.assert_called_once()
@mock.patch(VERTEX_AI_PATH.format("hyperparameter_tuning_job.HyperparameterTuningJobHook"))
def test_execute_complete(self, mock_hook):
test_job_id = "test_job_id"
test_job = {"name": f"test/{test_job_id}"}
event = {
"status": "success",
"message": "test message",
"job": test_job,
}
mock_hook.return_value.extract_hyperparameter_tuning_job_id.return_value = test_job_id
mock_context = mock.MagicMock()
op = CreateHyperparameterTuningJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
worker_pool_specs=[],
parameter_spec={},
metric_spec={},
max_trial_count=15,
parallel_trial_count=3,
)
result = op.execute_complete(context=mock_context, event=event)
assert result == test_job
def test_execute_complete_error(self):
event = {
"status": "error",
"message": "test error message",
}
op = CreateHyperparameterTuningJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
worker_pool_specs=[],
parameter_spec={},
metric_spec={},
max_trial_count=15,
parallel_trial_count=3,
)
with pytest.raises(AirflowException):
op.execute_complete(context=mock.MagicMock(), event=event)
| TestVertexAICreateHyperparameterTuningJobOperator |
python | ray-project__ray | release/llm_tests/serve/probes/query_utils.py | {
"start": 4849,
"end": 5398
} | class ____:
def __init__(
self,
client: openai.AsyncClient,
retryable_error_types: Sequence[Type[APIStatusError]] = None,
):
assert not client or isinstance(
client, openai.AsyncClient
), "Async OpenAI client is expected!"
self.client: openai.AsyncClient = client
self.retryable_error_types: Sequence[Type[APIStatusError]] = (
retryable_error_types
if retryable_error_types is not None
else DEFAULT_RETRYABLE_EXCEPTIONS
)
| BaseProbe |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_memusage.py | {
"start": 47008,
"end": 48651
} | class ____(fixtures.TestBase):
@testing.fixture
def user_fixture(self, decl_base):
class User(decl_base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(50))
decl_base.metadata.create_all(testing.db)
yield User
@testing.requires.predictable_gc
def test_gced_delete_on_rollback(self, user_fixture):
User = user_fixture
s = fixture_session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
gc_collect()
gc_collect()
assert u1_state.obj() is None
s.rollback()
# new in 1.1, not in identity map if the object was
# gc'ed and we restore snapshot; we've changed update_impl
# to just skip this object
assert u1_state not in s.identity_map.all_states()
# in any version, the state is replaced by the query
# because the identity map would switch it
u1 = s.query(User).filter_by(name="ed").one()
assert u1_state not in s.identity_map.all_states()
eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 1)
s.delete(u1)
s.flush()
eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 0)
s.commit()
| MiscMemoryIntensiveTests |
python | django__django | django/db/models/query_utils.py | {
"start": 1188,
"end": 8518
} | class ____(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
default = AND
conditional = True
connectors = (None, AND, OR, XOR)
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
if _connector not in self.connectors:
connector_reprs = ", ".join(f"{conn!r}" for conn in self.connectors[1:])
raise ValueError(f"_connector must be one of {connector_reprs}, or None.")
super().__init__(
children=[*args, *sorted(kwargs.items())],
connector=_connector,
negated=_negated,
)
def _combine(self, other, conn):
if getattr(other, "conditional", False) is False:
raise TypeError(other)
if not self:
return other.copy()
if not other and isinstance(other, Q):
return self.copy()
obj = self.create(connector=conn)
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
def __invert__(self):
obj = self.copy()
obj.negate()
return obj
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self,
reuse,
allow_joins=allow_joins,
split_subq=False,
check_filterable=False,
summarize=summarize,
)
query.promote_joins(joins)
return clause
def replace_expressions(self, replacements):
if not replacements:
return self
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
child_replacement = child
if isinstance(child, tuple):
lhs, rhs = child
if LOOKUP_SEP in lhs:
path, lookup = lhs.rsplit(LOOKUP_SEP, 1)
else:
path = lhs
lookup = None
field = models.F(path)
if (
field_replacement := field.replace_expressions(replacements)
) is not field:
# Handle the implicit __exact case by falling back to an
# extra transform when get_lookup returns no match for the
# last component of the path.
if lookup is None:
lookup = "exact"
if (lookup_class := field_replacement.get_lookup(lookup)) is None:
if (
transform_class := field_replacement.get_transform(lookup)
) is not None:
field_replacement = transform_class(field_replacement)
lookup = "exact"
lookup_class = field_replacement.get_lookup(lookup)
if rhs is None and lookup == "exact":
lookup_class = field_replacement.get_lookup("isnull")
rhs = True
if lookup_class is not None:
child_replacement = lookup_class(field_replacement, rhs)
else:
child_replacement = child.replace_expressions(replacements)
clone.children.append(child_replacement)
return clone
def flatten(self):
"""
Recursively yield this Q object and all subexpressions, in depth-first
order.
"""
yield self
for child in self.children:
if isinstance(child, tuple):
# Use the lookup.
child = child[1]
if hasattr(child, "flatten"):
yield from child.flatten()
else:
yield child
def check(self, against, using=DEFAULT_DB_ALIAS):
"""
Do a database query to check if the expressions of the Q instance
matches against the expressions.
"""
# Avoid circular imports.
from django.db.models import BooleanField, Value
from django.db.models.functions import Coalesce
from django.db.models.sql import Query
from django.db.models.sql.constants import SINGLE
query = Query(None)
for name, value in against.items():
if not hasattr(value, "resolve_expression"):
value = Value(value)
query.add_annotation(value, name, select=False)
query.add_annotation(Value(1), "_check")
connection = connections[using]
# This will raise a FieldError if a field is missing in "against".
if connection.features.supports_comparing_boolean_expr:
query.add_q(Q(Coalesce(self, True, output_field=BooleanField())))
else:
query.add_q(self)
compiler = query.get_compiler(using=using)
context_manager = (
transaction.atomic(using=using)
if connection.in_atomic_block
else nullcontext()
)
try:
with context_manager:
return compiler.execute_sql(SINGLE) is not None
except DatabaseError as e:
logger.warning("Got a database error calling check() on %r: %s", self, e)
return True
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.query_utils"):
path = path.replace("django.db.models.query_utils", "django.db.models")
args = tuple(self.children)
kwargs = {}
if self.connector != self.default:
kwargs["_connector"] = self.connector
if self.negated:
kwargs["_negated"] = True
return path, args, kwargs
@cached_property
def identity(self):
path, args, kwargs = self.deconstruct()
identity = [path, *kwargs.items()]
for child in args:
if isinstance(child, tuple):
arg, value = child
value = make_hashable(value)
identity.append((arg, value))
else:
identity.append(child)
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Q):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
@cached_property
def referenced_base_fields(self):
"""
Retrieve all base fields referenced directly or through F expressions
excluding any fields referenced through joins.
"""
# Avoid circular imports.
from django.db.models.sql import query
return {
child.split(LOOKUP_SEP, 1)[0] for child in query.get_children_from_q(self)
}
| Q |
python | pennersr__django-allauth | allauth/idp/oidc/app_settings.py | {
"start": 0,
"end": 2052
} | class ____:
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def ADAPTER(self):
return self._setting(
"ADAPTER",
"allauth.idp.oidc.adapter.DefaultOIDCAdapter",
)
@property
def ID_TOKEN_EXPIRES_IN(self) -> int:
return 5 * 60
@property
def PRIVATE_KEY(self) -> str:
return self._setting("PRIVATE_KEY", "")
@property
def ACCESS_TOKEN_EXPIRES_IN(self) -> int:
return self._setting("ACCESS_TOKEN_EXPIRES_IN", 3600)
@property
def AUTHORIZATION_CODE_EXPIRES_IN(self) -> int:
return self._setting("AUTHORIZATION_CODE_EXPIRES_IN", 60)
@property
def ROTATE_REFRESH_TOKEN(self) -> bool:
return self._setting("ROTATE_REFRESH_TOKEN", True)
@property
def DEVICE_CODE_EXPIRES_IN(self) -> int:
return self._setting("DEVICE_CODE_EXPIRES_IN", 300)
@property
def DEVICE_CODE_INTERVAL(self) -> int:
return self._setting("DEVICE_CODE_INTERVAL", 5)
@property
def RATE_LIMITS(self):
rls = self._setting("RATE_LIMITS", {})
if rls is False:
return {}
ret = {
# OIDC device user code checks
"device_user_code": "5/m/ip"
}
ret.update(rls)
return ret
@property
def RP_INITIATED_LOGOUT_ASKS_FOR_OP_LOGOUT(self):
"""
At https://openid.net/specs/openid-connect-rpinitiated-1_0.html
> 2. RP-Initiated Logout':
> At the Logout Endpoint, the OP SHOULD ask the End-User whether to
> log out of the OP as well.
This setting controls whether the OP always asks.
"""
return self._setting("RP_INITIATED_LOGOUT_ASKS_FOR_OP_LOGOUT", True)
_app_settings = AppSettings("IDP_OIDC_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | python__mypy | mypyc/ir/ops.py | {
"start": 4512,
"end": 5304
} | class ____:
"""Abstract base class for all IR values.
These include references to registers, literals, and all
operations (Ops), such as assignments, calls and branches.
Values are often used as inputs of Ops. Register can be used as an
assignment target.
A Value is part of the IR being compiled if it's included in a BasicBlock
that is reachable from a FuncIR (i.e., is part of a function).
See also: Op is a subclass of Value that is the base class of all
operations.
"""
# Source line number (-1 for no/unknown line)
line = -1
# Type of the value or the result of the operation
type: RType = void_rtype
is_borrowed = False
@property
def is_void(self) -> bool:
return isinstance(self.type, RVoid)
@final
| Value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.