language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 59376,
"end": 61354
} | class ____(_CreateBase):
_id = "create_copy_of"
_inputs = [("target", AddressT())]
@property
def _preamble_len(self):
return 11
def _add_gas_estimate(self, args, should_use_create2):
# max possible runtime length + preamble length
return _create_addl_gas_estimate(EIP_170_LIMIT + self._preamble_len, should_use_create2)
def _build_create_IR(self, expr, args, context, value, salt, revert_on_failure):
target = args[0]
# something we can pass to scope_multi
with scope_multi(
(target, value, salt), ("create_target", "create_value", "create_salt")
) as (b1, (target, value, salt)):
codesize = IRnode.from_list(["extcodesize", target])
msize = IRnode.from_list(["msize"])
with scope_multi((codesize, msize), ("target_codesize", "mem_ofst")) as (
b2,
(codesize, mem_ofst),
):
ir = ["seq"]
# make sure there is actually code at the target
check_codesize = ["assert", codesize]
ir.append(
IRnode.from_list(check_codesize, error_msg="empty target (create_copy_of)")
)
# store the preamble at msize + 22 (zero padding)
preamble, preamble_len = _create_preamble(codesize)
assert preamble_len == self._preamble_len
ir.append(["mstore", mem_ofst, preamble])
# copy the target code into memory. current layout:
# msize | 00...00 (22 0's) | preamble | bytecode
ir.append(["extcodecopy", target, add_ofst(mem_ofst, 32), 0, codesize])
buf = add_ofst(mem_ofst, 32 - preamble_len)
buf_len = ["add", codesize, preamble_len]
ir.append(_create_ir(value, buf, buf_len, salt, revert_on_failure))
return b1.resolve(b2.resolve(ir))
| CreateCopyOf |
python | doocs__leetcode | solution/0000-0099/0087.Scramble String/Solution.py | {
"start": 0,
"end": 479
} | class ____:
def isScramble(self, s1: str, s2: str) -> bool:
@cache
def dfs(i: int, j: int, k: int) -> bool:
if k == 1:
return s1[i] == s2[j]
for h in range(1, k):
if dfs(i, j, h) and dfs(i + h, j + h, k - h):
return True
if dfs(i + h, j, k - h) and dfs(i, j + k - h, h):
return True
return False
return dfs(0, 0, len(s1))
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 458334,
"end": 458660
} | class ____(sgqlc.types.Interface):
"""Entities that can be updated."""
__schema__ = github_schema
__field_names__ = ("viewer_can_update",)
viewer_can_update = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanUpdate")
"""Check if the current viewer can update this object."""
| Updatable |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/query_metrics/query_template_values.py | {
"start": 713,
"end": 4918
} | class ____(QueryMetricProvider):
metric_name = "query.template_values"
value_keys = (
"template_dict",
"query",
)
@classmethod
def get_query(cls, query, template_dict, selectable) -> str:
template_dict_reformatted = {
k: str(v).format(batch=selectable)
if isinstance(v, numbers.Number)
else v.format(batch=selectable)
for k, v in template_dict.items()
}
query_reformatted = query.format(**template_dict_reformatted, batch=selectable)
return query_reformatted
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> List[dict]:
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
selectable: Union[sa.sql.Selectable, str]
selectable, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
template_dict = metric_value_kwargs.get("template_dict")
if not isinstance(template_dict, dict):
raise TypeError("template_dict supplied by the expectation must be a dict") # noqa: TRY003 # FIXME CoP
if isinstance(selectable, sa.Table):
query = cls.get_query(query, template_dict, selectable)
elif isinstance(
selectable, get_sqlalchemy_subquery_type()
): # Specifying a runtime query in a RuntimeBatchRequest returns the active batch as a Subquery; sectioning # noqa: E501 # FIXME CoP
# the active batch off w/ parentheses ensures flow of operations doesn't break
query = cls.get_query(query, template_dict, f"({selectable})")
elif isinstance(
selectable, sa.sql.Select
): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & # noqa: E501 # FIXME CoP
# aliasing when formatting the parameterized query
query = cls.get_query(
query,
template_dict,
f"({selectable.compile(compile_kwargs={'literal_binds': True})}) AS subselect",
)
else:
query = cls.get_query(query, template_dict, f"({selectable})")
try:
result: Union[Sequence[sa.Row[Any]], Any] = execution_engine.execute_query(
sa.text(query)
).fetchall()
except Exception as e:
if hasattr(e, "_query_id"):
# query_id removed because it duplicates the validation_results
e._query_id = None
raise e # noqa: TRY201 # FIXME CoP
if isinstance(result, Sequence):
return [element._asdict() for element in result]
else:
return [result]
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> List[dict]:
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
df: pyspark.DataFrame
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
df.createOrReplaceTempView("tmp_view")
template_dict = metric_value_kwargs.get("template_dict")
if not isinstance(query, str):
raise TypeError("template_dict supplied by the expectation must be a dict") # noqa: TRY003 # FIXME CoP
if not isinstance(template_dict, dict):
raise TypeError("template_dict supplied by the expectation must be a dict") # noqa: TRY003 # FIXME CoP
query = query.format(**template_dict, batch="tmp_view")
engine: pyspark.SparkSession = execution_engine.spark
result: List[pyspark.Row] = engine.sql(query).collect()
return [element.asDict() for element in result]
| QueryTemplateValues |
python | pdm-project__pdm | src/pdm/formats/uv.py | {
"start": 676,
"end": 10382
} | class ____:
project: Project
requires_python: str
requirements: list[Requirement]
locked_repository: LockedRepository
stack: ExitStack = field(default_factory=ExitStack, init=False)
@cached_property
def default_source(self) -> str:
return cast(str, self.project.sources[0].url)
def __post_init__(self) -> None:
self._enter_path(self.project.root / "uv.lock")
def build_pyproject_toml(self) -> Path:
data = self.project.pyproject.open_for_read()
uv_overrides = []
for override_key, override_value in (
data.get("tool", {}).get("pdm", {}).get("resolution", {}).get("overrides", {}).items()
):
uv_overrides.append(f"{get_requirement_from_override(override_key, override_value)}")
if uv_overrides:
data.setdefault("tool", {}).setdefault("uv", {}).setdefault("override-dependencies", []).extend(
uv_overrides
)
data.setdefault("project", {})["requires-python"] = self.requires_python
data.pop("dependency-groups", None)
data.setdefault("project", {}).pop("optional-dependencies", None)
sources = {}
collected_deps: dict[str, list[str]] = {}
for dep in self.requirements:
if isinstance(dep, FileRequirement):
entry = self._get_name(dep)
sources[entry] = self._build_source(dep)
else:
entry = dep.as_line()
for group in dep.groups:
collected = collected_deps.setdefault(group, [])
if entry not in collected:
collected.append(entry)
for group, deps in collected_deps.items():
if group == "default":
data.setdefault("project", {})["dependencies"] = deps
else:
data.setdefault("project", {}).setdefault("optional-dependencies", {})[group] = deps
if sources:
data.setdefault("tool", {}).setdefault("uv", {}).setdefault("sources", {}).update(sources)
path = self._enter_path(self.project.root / "pyproject.toml")
with path.open("w", newline="", encoding="utf-8") as f:
tomlkit.dump(data, f)
return path
def _enter_path(self, path: Path) -> Path:
if path.exists():
name = tempfile.mktemp(dir=path.parent, prefix=f"{path.name}.")
backup = path.rename(name)
@self.stack.callback
def restore() -> None:
path.unlink(True)
backup.rename(path)
else:
self.stack.callback(path.unlink, True)
return path
def build_uv_lock(self, include_self: bool = False) -> Path:
locked_repo = self.locked_repository
packages: list[dict[str, Any]] = []
for key in locked_repo.packages:
if "[" in key[0]: # skip entries with extras
continue
# Merge related entries with the same name and version
related_packages = [
p for k, p in locked_repo.packages.items() if strip_extras(k[0])[0] == key[0] and k[1:] == key[1:]
]
packages.append(self._build_lock_entry(related_packages))
if name := self.project.name:
version = self.project.pyproject.metadata.get("version", "0.0.0")
this_package = {
"name": normalize_name(name),
"version": version,
"source": {"editable" if include_self else "virtual": "."},
}
dependencies: list[dict[str, Any]] = []
optional_dependencies: dict[str, list[dict[str, Any]]] = {}
for req in self.requirements:
if (dep := self._make_dependency(None, req)) is None:
continue
for group in req.groups:
if group == "default":
target_group = dependencies
else:
target_group = optional_dependencies.setdefault(group, [])
if dep not in target_group:
target_group.append(dep)
if dependencies:
this_package["dependencies"] = dependencies # type: ignore[assignment]
if optional_dependencies:
this_package["optional-dependencies"] = optional_dependencies
packages.append(this_package)
data = {"version": 1, "requires-python": self.requires_python}
if packages:
data["package"] = packages
path = self.project.root / "uv.lock"
with path.open("w", newline="", encoding="utf-8") as f:
tomlkit.dump(data, f)
return path
def _get_name(self, req: FileRequirement) -> str:
if req.key:
return req.key
can = Candidate(req).prepare(self.project.environment)
return normalize_name(can.metadata.name)
def _build_source(self, req: FileRequirement) -> dict[str, Any]:
result: dict[str, Any]
if isinstance(req, VcsRequirement):
result = {req.vcs: req.repo}
if req.ref:
result["rev"] = req.ref
elif req.path:
result = {"path": req.str_path}
else:
result = {"url": req.url}
if req.editable:
result["editable"] = True
return result
def _build_lock_source(self, req: Requirement) -> dict[str, Any]:
if isinstance(req, VcsRequirement):
return {req.vcs: f"{req.repo}?rev={req.ref}#{req.revision}"}
elif isinstance(req, FileRequirement):
if req.editable:
return {"editable": req.str_path}
else:
return {"url": req.url}
else:
return {"registry": self.default_source}
def _build_lock_entry(self, packages: list[Package]) -> dict[str, Any]:
packages.sort(key=lambda x: len(x.candidate.req.extras or []))
candidate = packages[0].candidate
req = candidate.req
result: dict[str, Any] = {
"name": candidate.name,
"version": candidate.version,
"source": self._build_lock_source(req),
}
for file_hash in candidate.hashes:
filename = file_hash.get("url", file_hash.get("file", ""))
is_wheel = filename.endswith(".whl")
item = {"url": file_hash.get("url", filename), "hash": file_hash["hash"]}
if is_wheel:
result.setdefault("wheels", []).append(item)
else:
result["sdist"] = item
optional_dependencies: dict[str, list[dict[str, Any]]] = {}
for package in packages:
if package.dependencies is None:
continue
if not package.candidate.req.extras:
deps = [
self._make_dependency(package.candidate, parse_requirement(dep)) for dep in package.dependencies
]
result["dependencies"] = [dep for dep in deps if dep is not None]
else:
deps = [
self._make_dependency(package.candidate, parse_requirement(dep))
for dep in package.dependencies
if parse_requirement(dep).key != candidate.req.key
]
deps = [dep for dep in deps if dep is not None]
for extra in package.candidate.req.extras:
# XXX: when depending on a package with extras, the extra dependencies are encoded in
# the corresponding group under optional-dependencies. But in case multiple extras are requested,
# the same dependencies get duplicated in those groups, but it's okay if each single extra is
# never requested alone.
if extra not in optional_dependencies:
optional_dependencies[extra] = deps # type: ignore[assignment]
if optional_dependencies:
result["optional-dependencies"] = optional_dependencies
return result
def _make_dependency(self, parent: Candidate | None, req: Requirement) -> dict[str, Any] | None:
locked_repo = self.locked_repository
parent_marker = req.marker or get_marker("")
if parent is not None:
parent_marker &= parent.req.marker or get_marker("")
matching_entries = [e for k, e in locked_repo.packages.items() if k[0] == req.key]
def marker_match(marker: Marker | None) -> bool:
return not (parent_marker & (marker or get_marker(""))).is_empty()
if not matching_entries:
return None
result: dict[str, Any] = {}
if len(matching_entries) == 1:
candidate = matching_entries[0].candidate
multiple = False
else:
candidate = next(e.candidate for e in matching_entries if marker_match(e.candidate.req.marker))
multiple = True
result.update({"name": candidate.name})
if multiple:
result.update(version=candidate.version, source=self._build_lock_source(candidate.req))
if req.extras:
result["extra"] = list(req.extras)
if req.marker is not None:
result["marker"] = str(req.marker)
return result
@contextmanager
def uv_file_builder(
project: Project, requires_python: str, requirements: list[Requirement], locked_repository: LockedRepository
) -> Iterator[_UvFileBuilder]:
builder = _UvFileBuilder(project, requires_python, requirements, locked_repository)
with builder.stack:
yield builder
| _UvFileBuilder |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 393011,
"end": 395986
} | class ____(TestCase):
"""Validates that the *rowwsie_min_max* quantization functions are equivalent to the ones without it."""
def test_quantize_tensor_with_min_max(self):
num_rows_list = [1, 2, 10, 100]
num_cols_list = [4, 8, 16, 32, 64, 128]
# Map of quantization bit rate to tuple of quantize function (with rowwise_min_max) and
# quantize function (without rowwise_min_max)
bit_rate_to_quant_fn: dict[
int,
tuple[
OpOverloadPacket,
OpOverloadPacket,
],
] = {
8: (
torch.ops.quantized.embedding_bag_byte_prepack_with_rowwise_min_max,
torch.ops.quantized.embedding_bag_byte_prepack,
),
4: (
torch.ops.quantized.embedding_bag_4bit_prepack_with_rowwise_min_max,
torch.ops.quantized.embedding_bag_4bit_prepack,
),
2: (
torch.ops.quantized.embedding_bag_2bit_prepack_with_rowwise_min_max,
torch.ops.quantized.embedding_bag_2bit_prepack,
),
}
for quant_fn_with_rowwise_min_max, quant_fn in bit_rate_to_quant_fn.values():
for torch_dtype in [torch.float16, torch.float32]:
for num_rows, num_cols in itertools.product(num_rows_list, num_cols_list):
weight = torch.rand(num_rows, num_cols, dtype=torch_dtype)
rowwise_min_max = torch.stack(
[weight.min(dim=1).values, weight.max(dim=1).values], dim=1
)
# Perform the quantization with rowwise_min_max
weight_quantized = quant_fn_with_rowwise_min_max(
weight, rowwise_min_max
)
assert weight_quantized.dtype == torch.uint8
# Confirm that the quantization is matching the one without rowwise_min_max
weight_quantized_no_rowwise_min_max = quant_fn(weight)
assert torch.equal(
weight_quantized, weight_quantized_no_rowwise_min_max
)
# Confirtm that incorrect rowwise_min_max will result in different quantization output
incorrect_rowwise_min_max = torch.stack(
[weight.max(dim=1).values, weight.max(dim=1).values], dim=1
)
weight_incorrectly_quantized = quant_fn_with_rowwise_min_max(
weight, incorrect_rowwise_min_max
)
assert weight_incorrectly_quantized.dtype == torch.uint8
assert not torch.equal(
weight_incorrectly_quantized, weight_quantized_no_rowwise_min_max
)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestQuantizedWithMinMax |
python | google__pytype | pytype/rewrite/abstract/containers_test.py | {
"start": 424,
"end": 1084
} | class ____(BaseTest):
def test_constant_type(self):
a = self.const_var("a")
c = containers.List(self.ctx, [a])
assert_type(c.constant, list[_Var])
def test_append(self):
l1 = containers.List(self.ctx, [self.const_var("a")])
l2 = l1.append(self.const_var("b"))
self.assertEqual(l2.constant, [self.const_var("a"), self.const_var("b")])
def test_extend(self):
l1 = containers.List(self.ctx, [self.const_var("a")])
l2 = containers.List(self.ctx, [self.const_var("b")])
l3 = l1.extend(l2)
self.assertIsInstance(l3, containers.List)
self.assertEqual(l3.constant, [self.const_var("a"), self.const_var("b")])
| ListTest |
python | PyCQA__pylint | tests/functional/u/unexpected_special_method_signature.py | {
"start": 3477,
"end": 3550
} | class ____(Philosopher, default_name="Bruce"):
pass
| AustralianPhilosopher |
python | scipy__scipy | scipy/ndimage/tests/test_interpolation.py | {
"start": 22702,
"end": 40295
} | class ____:
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform01(self, order, xp):
data = xp.asarray([1])
out = ndimage.affine_transform(data, xp.asarray([[1]]), order=order)
assert_array_almost_equal(out, xp.asarray([1]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform02(self, order, xp):
data = xp.ones([4])
out = ndimage.affine_transform(data, xp.asarray([[1]]), order=order)
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform03(self, order, xp):
data = xp.ones([4])
out = ndimage.affine_transform(data, xp.asarray([[1]]), -1, order=order)
assert_array_almost_equal(out, xp.asarray([0, 1, 1, 1]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform04(self, order, xp):
data = xp.asarray([4, 1, 3, 2])
out = ndimage.affine_transform(data, xp.asarray([[1]]), -1, order=order)
assert_array_almost_equal(out, xp.asarray([0, 4, 1, 3]))
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
def test_affine_transform05(self, order, dtype, xp):
dtype = getattr(xp, dtype)
data = xp.asarray([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=dtype)
expected = xp.asarray([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]], dtype=dtype)
if xp.isdtype(data.dtype, 'complex floating'):
data -= 1j * data
expected -= 1j * expected
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
[0, -1], order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform06(self, order, xp):
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
[0, -1], order=order)
assert_array_almost_equal(out, xp.asarray([[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform07(self, order, xp):
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
[-1, 0], order=order)
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform08(self, order, xp):
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
[-1, -1], order=order)
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform09(self, order, xp):
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered, xp.asarray([[1, 0], [0, 1]]),
[-1, -1], order=order,
prefilter=False)
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform10(self, order, xp):
data = xp.ones([2], dtype=xp.float64)
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), output_shape=(4,),
order=order)
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 0]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform11(self, order, xp):
data = xp.asarray([1, 5, 2, 6, 3, 7, 4, 4])
out = ndimage.affine_transform(data, xp.asarray([[2]]), 0, (4,), order=order)
assert_array_almost_equal(out, xp.asarray([1, 2, 3, 4]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform12(self, order, xp):
data = xp.asarray([1, 2, 3, 4])
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), 0, (8,), order=order)
assert_array_almost_equal(out[::2], xp.asarray([1, 2, 3, 4]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform13(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 2]]), 0, (3, 2),
order=order)
assert_array_almost_equal(out, xp.asarray([[1, 3], [5, 7], [9, 11]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform14(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[2, 0], [0, 1]]), 0, (1, 4),
order=order)
assert_array_almost_equal(out, xp.asarray([[1, 2, 3, 4]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform15(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[2, 0], [0, 2]]), 0, (1, 2),
order=order)
assert_array_almost_equal(out, xp.asarray([[1, 3]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform16(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[1, 0.0], [0, 0.5]]), 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform17(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 1]]), 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform18(self, order, xp):
data = xp.asarray([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 0.5]]), 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform19(self, order, xp):
data = xp.asarray([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=xp.float64)
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 0.5]]), 0,
(6, 8), order=order)
out = ndimage.affine_transform(out, xp.asarray([[2.0, 0], [0, 2.0]]), 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
@xfail_xp_backends("cupy", reason="https://github.com/cupy/cupy/issues/8394")
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform20(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[0], [2]]), 0, (2,),
order=order)
assert_array_almost_equal(out, xp.asarray([1, 3]))
@xfail_xp_backends("cupy", reason="https://github.com/cupy/cupy/issues/8394")
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform21(self, order, xp):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
data = xp.asarray(data)
out = ndimage.affine_transform(data, xp.asarray([[2], [0]]), 0, (2,),
order=order)
assert_array_almost_equal(out, xp.asarray([1, 9]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform22(self, order, xp):
# shift and offset interaction; see issue #1547
data = xp.asarray([4, 1, 3, 2])
out = ndimage.affine_transform(data, xp.asarray([[2]]), [-1], (3,),
order=order)
assert_array_almost_equal(out, xp.asarray([0, 1, 2]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform23(self, order, xp):
# shift and offset interaction; see issue #1547
data = xp.asarray([4, 1, 3, 2])
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), [-1], (8,),
order=order)
assert_array_almost_equal(out[::2], xp.asarray([0, 4, 1, 3]))
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform24(self, order, xp):
# consistency between diagonal and non-diagonal case; see issue #1547
data = xp.asarray([4, 1, 3, 2])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
'The behavior of affine_transform with a 1-D array .* has changed',
UserWarning)
out1 = ndimage.affine_transform(data, xp.asarray([2]), -1, order=order)
out2 = ndimage.affine_transform(data, xp.asarray([[2]]), -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform25(self, order, xp):
# consistency between diagonal and non-diagonal case; see issue #1547
data = xp.asarray([4, 1, 3, 2])
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
'The behavior of affine_transform with a 1-D array .* '
'has changed', UserWarning)
out1 = ndimage.affine_transform(data, xp.asarray([0.5]), -1, order=order)
out2 = ndimage.affine_transform(data, xp.asarray([[0.5]]), -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform26(self, order, xp):
# test homogeneous coordinates
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
tform_original = xp.eye(2)
offset_original = -xp.ones((2, 1))
tform_h1 = xp.concat((tform_original, offset_original), axis=1) # hstack
tform_h2 = xp.concat((tform_h1, xp.asarray([[0.0, 0, 1]])), axis=0) # vstack
offs = [float(x) for x in xp.reshape(offset_original, (-1,))]
out1 = ndimage.affine_transform(filtered, tform_original,
offs,
order=order, prefilter=False)
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
prefilter=False)
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
prefilter=False)
for out in [out1, out2, out3]:
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]]))
@xfail_xp_backends("cupy", reason="does not raise")
def test_affine_transform27(self, xp):
# test valid homogeneous transformation matrix
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
tform_h1 = xp.concat((xp.eye(2), -xp.ones((2, 1))) , axis=1) # vstack
tform_h2 = xp.concat((tform_h1, xp.asarray([[5.0, 2, 1]])), axis=0) # hstack
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
@skip_xp_backends(np_only=True, reason='byteorder is numpy-specific')
def test_affine_transform_1d_endianness_with_output_parameter(self, xp):
# 1d affine transform given output ndarray or dtype with
# either endianness. see issue #7388
data = xp.ones((2, 2))
for out in [xp.empty_like(data),
xp.empty_like(data).astype(data.dtype.newbyteorder()),
data.dtype, data.dtype.newbyteorder()]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
'The behavior of affine_transform with a 1-D array '
'.* has changed', UserWarning)
matrix = xp.asarray([1, 1])
returned = ndimage.affine_transform(data, matrix, output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, xp.asarray([[1, 1], [1, 1]]))
@skip_xp_backends(np_only=True, reason='byteorder is numpy-specific')
def test_affine_transform_multi_d_endianness_with_output_parameter(self, xp):
# affine transform given output ndarray or dtype with either endianness
# see issue #4127
# NB: byteorder is numpy-specific
data = np.asarray([1])
for out in [data.dtype, data.dtype.newbyteorder(),
np.empty_like(data),
np.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.affine_transform(data, np.asarray([[1]]), output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, np.asarray([1]))
@skip_xp_backends(np_only=True,
reason='`out` of a different size is numpy-specific'
)
def test_affine_transform_output_shape(self, xp):
# don't require output_shape when out of a different size is given
data = xp.arange(8, dtype=xp.float64)
out = xp.ones((16,))
ndimage.affine_transform(data, xp.asarray([[1]]), output=out)
assert_array_almost_equal(out[:8], data)
# mismatched output shape raises an error
with pytest.raises(RuntimeError):
ndimage.affine_transform(
data, [[1]], output=out, output_shape=(12,))
@skip_xp_backends(np_only=True, reason='string `output` is numpy-specific')
def test_affine_transform_with_string_output(self, xp):
data = xp.asarray([1])
out = ndimage.affine_transform(data, xp.asarray([[1]]), output='f')
assert out.dtype is np.dtype('f')
assert_array_almost_equal(out, xp.asarray([1]))
@pytest.mark.parametrize('shift',
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform_shift_via_grid_wrap(self, shift, order, xp):
# For mode 'grid-wrap', integer shifts should match np.roll
x = np.asarray([[0, 1],
[2, 3]])
affine = np.zeros((2, 3))
affine[:2, :2] = np.eye(2)
affine[:, 2] = np.asarray(shift)
expected = np.roll(x, shift, axis=(0, 1))
x = xp.asarray(x)
affine = xp.asarray(affine)
expected = xp.asarray(expected)
assert_array_almost_equal(
ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
expected
)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform_shift_reflect(self, order, xp):
# shift by x.shape results in reflection
x = np.asarray([[0, 1, 2],
[3, 4, 5]])
expected = x[::-1, ::-1].copy() # strides >0 for torch
x = xp.asarray(x)
expected = xp.asarray(expected)
affine = np.zeros([2, 3])
affine[:2, :2] = np.eye(2)
affine[:, 2] = np.asarray(x.shape)
affine = xp.asarray(affine)
assert_array_almost_equal(
ndimage.affine_transform(x, affine, mode='reflect', order=order),
expected,
)
@make_xp_test_case(ndimage.shift)
| TestAffineTransform |
python | huggingface__transformers | src/transformers/models/perceiver/tokenization_perceiver.py | {
"start": 813,
"end": 8035
} | class ____(PreTrainedTokenizer):
"""
Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
bos_token (`str`, *optional*, defaults to `"[BOS]"`):
The BOS token (reserved in the vocab, but not actually used).
eos_token (`str`, *optional*, defaults to `"[EOS]"`):
The end of sequence token (reserved in the vocab, but not actually used).
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The MASK token, useful for masked language modeling.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The CLS token (reserved in the vocab, but not actually used).
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from two sequences.
"""
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
pad_token="[PAD]",
bos_token="[BOS]",
eos_token="[EOS]",
mask_token="[MASK]",
cls_token="[CLS]",
sep_token="[SEP]",
model_max_length=2048,
**kwargs,
) -> None:
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
mask_token = AddedToken(mask_token, lstrip=False, rstrip=False) if isinstance(mask_token, str) else mask_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
self._utf_vocab_size = 2**8 # utf is 8 bits
# Since these tokens are not part of the vocabulary, we manually add them
self._added_tokens_decoder: dict[str, int] = {
0: pad_token,
1: bos_token,
2: eos_token,
3: mask_token,
4: cls_token,
5: sep_token,
}
self._num_special_tokens = len(self._added_tokens_decoder)
super().__init__(
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
mask_token=mask_token,
cls_token=cls_token,
sep_token=sep_token,
model_max_length=model_max_length,
**kwargs,
)
def get_vocab(self) -> dict[str, int]:
vocab = {}
for i in range(self._utf_vocab_size):
token = chr(i)
vocab[token] = i + self._num_special_tokens
vocab.update(self.added_tokens_encoder)
return vocab
@property
def vocab_size(self):
return self._utf_vocab_size
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the
following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
else:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
tokens = [chr(i) for i in text.encode("utf-8")]
return tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if len(token) != 1:
token_id = self.unk_token_id
else:
token_id = ord(token) + self._num_special_tokens
return token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = chr(index - self._num_special_tokens)
return token
# TODO @ArthurZ refactor this as well....
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
bstring = b""
for token in tokens:
if token in self.added_tokens_encoder:
tok_string = str(token).encode("utf-8")
else:
tok_string = bytes([ord(token)])
bstring += tok_string
string = bstring.decode("utf-8", errors="replace")
return string
# PerceiverTokenizer has no vocab file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
return ()
__all__ = ["PerceiverTokenizer"]
| PerceiverTokenizer |
python | tensorflow__tensorflow | tensorflow/lite/python/lite.py | {
"start": 76773,
"end": 81225
} | class ____(TFLiteConverterBaseV2):
"""Converts the given jax model into TensorFlow Lite model."""
def __init__(self, serving_funcs, inputs):
"""Constructor for TFLiteConverter.
Args:
serving_funcs: A list functions of the serving func of the jax module, the
model params should already be inlined. (e.g., `serving_func =
functools.partial(model, params=params)`)
inputs: Array of input tensor placeholders tuple,s like `jnp.zeros`. For
example, wrapped in an array like "[('input1', input1), ('input2',
input2)]]".
Jax functions are polymorphic, for example:
```python
def add(a, b):
return a + b
```
Will yield different computations if different input signatures are passed
in: Pass `add(10.0, 20.0)` will yield a scalar `add` while pass
`add(np.random((100, 1)), np.random(100, 100))` will yield a broadcasting
add. We will need the input information to do tracing for the converter
to properly convert the model. So it's important to pass in the desired
`input placeholders` with the correct input shape/type.
In the converted tflite model, the function name will be default to "main",
the output names will be the traced outputs. The output ordering shall
match the serving function.
""" # fmt: skip
super(TFLiteJaxConverterV2, self).__init__()
self._serving_funcs = serving_funcs
self._inputs = inputs
@_export_metrics
def convert(self):
"""Converts a Jax serving func based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ImportError:
If cannot import the jit from jax.
ValueError:
No serving function is specified.
Input tensors are not specified.
The truth value of an array with more than one element is ambiguous.
Failed to convert the given Jax function to hlo.
"""
if not _jit:
raise ImportError("Cannot import jit from jax.")
if not self._serving_funcs:
raise ValueError("No serving func is specified.")
if not self._inputs:
raise ValueError("Input tensors are not specified.")
if len(self._inputs) != len(self._serving_funcs):
msg = (
"Input tensor mapping len {} does not match serving func len {}."
.format(len(self._inputs), len(self._serving_funcs))
)
raise ValueError(msg)
if not isinstance(self._inputs, (tuple, list)):
raise ValueError(
"Input tensors should be pass in a tuple list wrapped in an array."
)
# TODO(b/197690428): Support multiple functions.
# Currently only support one serving function.
if len(self._serving_funcs) > 1:
raise ValueError("Currently only support single serving function.")
if not isinstance(self._inputs[0], (tuple, list)):
raise ValueError("The input placeholders are not a dictionary.")
input_names = []
ordered_inputs = []
for input_name, tensor in self._inputs[0]:
input_names.append(input_name)
ordered_inputs.append(tensor)
try:
hlo_proto = (
_jit(self._serving_funcs[0])
.trace(*ordered_inputs)
.lower(lowering_platforms=("cpu",))
.compiler_ir("hlo")
.as_serialized_hlo_module_proto()
)
except Exception: # pylint: disable=broad-except
raise ValueError("Failed to convert the given Jax function to hlo.")
# We need to set the hlo proto, and here we use serialized proto format
# since it's more compact.
converter_kwargs = {
"input_content": hlo_proto,
"input_names": input_names,
"is_proto_format": True,
}
converter_kwargs.update(self._get_base_converter_args())
# Get quantization options and do some checks.
quant_mode = QuantizationMode(
self.optimizations,
self.target_spec,
self.representative_dataset,
None,
experimental_qdq_annotation=self._experimental_strict_qdq,
)
self._validate_inference_input_output_types(quant_mode)
converter_kwargs.update(quant_mode.converter_flags())
result = _convert_jax_hlo(**converter_kwargs)
return self._optimize_tflite_model(
result,
quant_mode,
_build_conversion_flags(**converter_kwargs).debug_options,
quant_io=self.experimental_new_quantizer,
)
@_tf_export("lite.TFLiteConverter", v1=[])
| TFLiteJaxConverterV2 |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 18053,
"end": 19268
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreMemcachedHook")
def test_assert_valid_hook_call(self, mock_hook):
mock_hook.return_value.get_instance.return_value = cloud_memcache.Instance()
task = CloudMemorystoreMemcachedGetInstanceOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_instance.assert_called_once_with(
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreMemcachedGetInstanceOperator |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 5172,
"end": 8390
} | class ____:
def test_validate_simple_module(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create a simple test module
test_file = temp_path / "test_module.py"
test_file.write_text('''
def good_function():
"""A well-documented function."""
pass
''')
config = ValidationConfig(
root_path=temp_path,
path_converter=generic_path_converter,
)
# Mock the validation function to always return success
mock_result = ValidatorResult.create("test_module.good_function")
with patch(
"automation.dagster_docs.changed_validator.validate_symbol_docstring",
return_value=mock_result,
):
results = validate_changed_files([test_file], config)
assert len(results) == 1
assert results[0].symbol_info.symbol_path == "test_module.good_function"
assert not results[0].has_errors()
assert not results[0].has_warnings()
def test_file_filtering(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create Python and non-Python files
py_file = temp_path / "test.py"
py_file.write_text('def func(): """Doc"""; pass')
txt_file = temp_path / "test.txt"
txt_file.write_text("Not Python code")
config = ValidationConfig(
root_path=temp_path,
path_converter=generic_path_converter,
)
# Mock the validation function to always return success
mock_result = ValidatorResult.create("test.func")
with patch(
"automation.dagster_docs.changed_validator.validate_symbol_docstring",
return_value=mock_result,
):
results = validate_changed_files([py_file, txt_file], config)
# Only the Python file should be processed
assert len(results) == 1
assert results[0].symbol_info.file_path == py_file
def test_path_converter_filtering(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
test_file = temp_path / "test.py"
test_file.write_text('def func(): """Doc"""; pass')
# Path converter that rejects all files
def reject_all_converter(file_path, root_path):
return None
config = ValidationConfig(
root_path=temp_path,
path_converter=reject_all_converter,
)
# Mock the validation function (though it shouldn't be called)
mock_result = ValidatorResult.create("test.func")
with patch(
"automation.dagster_docs.changed_validator.validate_symbol_docstring",
return_value=mock_result,
):
results = validate_changed_files([test_file], config)
# No files should be processed due to path converter rejection
assert len(results) == 0
| TestValidateChangedFiles |
python | astropy__astropy | astropy/modeling/rotations.py | {
"start": 15004,
"end": 17663
} | class ____(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotation (if float it should be in deg).
"""
n_inputs = 2
n_outputs = 2
_separable = False
angle = Parameter(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Angle of rotation (Quantity or value in deg)",
)
def __init__(self, angle=angle, **kwargs):
super().__init__(angle=angle, **kwargs)
self._inputs = ("x", "y")
self._outputs = ("x", "y")
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : array-like
Input quantities
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotations.
If float, assumed in degrees.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, "unit", None)
y_unit = getattr(y, "unit", None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.ravel(), y.ravel()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit, subok=True), u.Quantity(
y, unit=y_unit, subok=True
)
return x, y
@staticmethod
def _compute_matrix(angle):
if not np.isscalar(angle):
angle = angle[0]
return np.array(
[[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]],
dtype=np.float64,
)
| Rotation2D |
python | django-haystack__django-haystack | haystack/backends/simple_backend.py | {
"start": 3166,
"end": 3832
} | class ____(BaseSearchQuery):
def build_query(self):
if not self.query_filter:
return "*"
return self._build_sub_query(self.query_filter)
def _build_sub_query(self, search_node):
term_list = []
for child in search_node.children:
if isinstance(child, SearchNode):
term_list.append(self._build_sub_query(child))
else:
value = child[1]
if not hasattr(value, "input_type_name"):
value = PythonData(value)
term_list.append(value.prepare(self))
return (" ").join(map(str, term_list))
| SimpleSearchQuery |
python | jina-ai__jina | tests/unit/jaml/test_type_parse.py | {
"start": 1932,
"end": 3672
} | class ____(BaseExecutor):
@requests
def foo(self, **kwargs):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyExec') == MyExec
assert JAML.cls_from_tag('!MyExec') == MyExec
assert JAML.cls_from_tag('BaseExecutor') == BaseExecutor
assert JAML.cls_from_tag('Nonexisting') is None
@pytest.mark.parametrize(
'field_name, override_field',
[
('with', None),
('metas', None),
('requests', None),
('with', {'a': 456, 'b': 'updated-test'}),
(
'metas',
{'name': 'test-name-updated', 'workspace': 'test-work-space-updated'},
),
('requests', {'/foo': 'baz'}),
# assure py_modules only occurs once #3830
(
'metas',
{
'name': 'test-name-updated',
'workspace': 'test-work-space-updated',
'py_modules': 'test_module.py',
},
),
],
)
def test_override_yml_params(field_name, override_field):
original_raw_yaml = {
'jtype': 'SimpleIndexer',
'with': {'a': 123, 'b': 'test'},
'metas': {'name': 'test-name', 'workspace': 'test-work-space'},
'requests': {'/foo': 'bar'},
}
updated_raw_yaml = original_raw_yaml
JAMLCompatible()._override_yml_params(updated_raw_yaml, field_name, override_field)
if override_field:
assert updated_raw_yaml[field_name] == override_field
else:
assert original_raw_yaml == updated_raw_yaml
# assure we don't create py_modules twice
if override_field == 'metas' and 'py_modules' in override_field:
assert 'py_modules' in updated_raw_yaml['metas']
assert 'py_modules' not in updated_raw_yaml
| MyExec |
python | jpadilla__pyjwt | tests/test_algorithms.py | {
"start": 25616,
"end": 30915
} | class ____:
"""
These test vectors were taken from RFC 7520
(https://tools.ietf.org/html/rfc7520)
"""
def test_hmac_verify_should_return_true_for_test_vector(self):
"""
This test verifies that HMAC verification works with a known good
signature and key.
Reference: https://tools.ietf.org/html/rfc7520#section-4.4
"""
signing_input = (
b"eyJhbGciOiJIUzI1NiIsImtpZCI6IjAxOGMwYWU1LTRkOWItNDcxYi1iZmQ2LWVlZ"
b"jMxNGJjNzAzNyJ9.SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ"
b"29pbmcgb3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIG"
b"lmIHlvdSBkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmc"
b"gd2hlcmUgeW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4"
)
signature = base64url_decode(b"s0h6KThzkfBBBkLspW1h84VsJZFTsPPqMDA7g1Md7p0")
algo = HMACAlgorithm(HMACAlgorithm.SHA256)
key = algo.prepare_key(load_hmac_key())
result = algo.verify(signing_input, key, signature)
assert result
@crypto_required
def test_rsa_verify_should_return_true_for_test_vector(self):
"""
This test verifies that RSA PKCS v1.5 verification works with a known
good signature and key.
Reference: https://tools.ietf.org/html/rfc7520#section-4.1
"""
signing_input = (
b"eyJhbGciOiJSUzI1NiIsImtpZCI6ImJpbGJvLmJhZ2dpbnNAaG9iYml0b24uZXhhb"
b"XBsZSJ9.SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ29pbmcgb"
b"3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdS"
b"Bkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcmU"
b"geW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4"
)
signature = base64url_decode(
b"MRjdkly7_-oTPTS3AXP41iQIGKa80A0ZmTuV5MEaHoxnW2e5CZ5NlKtainoFmKZop"
b"dHM1O2U4mwzJdQx996ivp83xuglII7PNDi84wnB-BDkoBwA78185hX-Es4JIwmDLJ"
b"K3lfWRa-XtL0RnltuYv746iYTh_qHRD68BNt1uSNCrUCTJDt5aAE6x8wW1Kt9eRo4"
b"QPocSadnHXFxnt8Is9UzpERV0ePPQdLuW3IS_de3xyIrDaLGdjluPxUAhb6L2aXic"
b"1U12podGU0KLUQSE_oI-ZnmKJ3F4uOZDnd6QZWJushZ41Axf_fcIe8u9ipH84ogor"
b"ee7vjbU5y18kDquDg"
)
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
key = cast(RSAPublicKey, algo.prepare_key(load_rsa_pub_key()))
result = algo.verify(signing_input, key, signature)
assert result
@crypto_required
def test_rsapss_verify_should_return_true_for_test_vector(self):
"""
This test verifies that RSA-PSS verification works with a known good
signature and key.
Reference: https://tools.ietf.org/html/rfc7520#section-4.2
"""
signing_input = (
b"eyJhbGciOiJQUzM4NCIsImtpZCI6ImJpbGJvLmJhZ2dpbnNAaG9iYml0b24uZXhhb"
b"XBsZSJ9.SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ29pbmcgb"
b"3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdS"
b"Bkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcmU"
b"geW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4"
)
signature = base64url_decode(
b"cu22eBqkYDKgIlTpzDXGvaFfz6WGoz7fUDcfT0kkOy42miAh2qyBzk1xEsnk2IpN6"
b"-tPid6VrklHkqsGqDqHCdP6O8TTB5dDDItllVo6_1OLPpcbUrhiUSMxbbXUvdvWXz"
b"g-UD8biiReQFlfz28zGWVsdiNAUf8ZnyPEgVFn442ZdNqiVJRmBqrYRXe8P_ijQ7p"
b"8Vdz0TTrxUeT3lm8d9shnr2lfJT8ImUjvAA2Xez2Mlp8cBE5awDzT0qI0n6uiP1aC"
b"N_2_jLAeQTlqRHtfa64QQSUmFAAjVKPbByi7xho0uTOcbH510a6GYmJUAfmWjwZ6o"
b"D4ifKo8DYM-X72Eaw"
)
algo = RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384)
key = cast(RSAPublicKey, algo.prepare_key(load_rsa_pub_key()))
result = algo.verify(signing_input, key, signature)
assert result
@crypto_required
def test_ec_verify_should_return_true_for_test_vector(self):
"""
This test verifies that ECDSA verification works with a known good
signature and key.
Reference: https://tools.ietf.org/html/rfc7520#section-4.3
"""
signing_input = (
b"eyJhbGciOiJFUzUxMiIsImtpZCI6ImJpbGJvLmJhZ2dpbnNAaG9iYml0b24uZXhhb"
b"XBsZSJ9.SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ29pbmcgb"
b"3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdS"
b"Bkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcmU"
b"geW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4"
)
signature = base64url_decode(
b"AE_R_YZCChjn4791jSQCrdPZCNYqHXCTZH0-JZGYNlaAjP2kqaluUIIUnC9qvbu9P"
b"lon7KRTzoNEuT4Va2cmL1eJAQy3mtPBu_u_sDDyYjnAMDxXPn7XrT0lw-kvAD890j"
b"l8e2puQens_IEKBpHABlsbEPX6sFY8OcGDqoRuBomu9xQ2"
)
algo = ECAlgorithm(ECAlgorithm.SHA512)
key = algo.prepare_key(load_ec_pub_key_p_521())
result = algo.verify(signing_input, key, signature)
assert result
# private key can also be used.
with open(key_path("jwk_ec_key_P-521.json")) as keyfile:
private_key = algo.from_jwk(keyfile.read())
result = algo.verify(signing_input, private_key, signature)
assert result
@crypto_required
| TestAlgorithmsRFC7520 |
python | django__django | tests/template_tests/filter_tests/test_linebreaks.py | {
"start": 216,
"end": 1087
} | class ____(SimpleTestCase):
"""
The contents in "linebreaks" are escaped according to the current
autoescape setting.
"""
@setup({"linebreaks01": "{{ a|linebreaks }} {{ b|linebreaks }}"})
def test_linebreaks01(self):
output = self.engine.render_to_string(
"linebreaks01", {"a": "x&\ny", "b": mark_safe("x&\ny")}
)
self.assertEqual(output, "<p>x&<br>y</p> <p>x&<br>y</p>")
@setup(
{
"linebreaks02": (
"{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}"
"{% endautoescape %}"
)
}
)
def test_linebreaks02(self):
output = self.engine.render_to_string(
"linebreaks02", {"a": "x&\ny", "b": mark_safe("x&\ny")}
)
self.assertEqual(output, "<p>x&<br>y</p> <p>x&<br>y</p>")
| LinebreaksTests |
python | PrefectHQ__prefect | src/integrations/prefect-redis/tests/test_messaging.py | {
"start": 15312,
"end": 21702
} | class ____:
"""Test the Redis messaging settings."""
def test_publisher_settings(self):
"""Test Redis publisher settings."""
settings = RedisMessagingPublisherSettings()
assert settings.batch_size == 5
assert settings.publish_every == timedelta(seconds=10)
assert settings.deduplicate_by is None
def test_consumer_settings(self):
"""Test Redis consumer settings."""
settings = RedisMessagingConsumerSettings()
assert settings.block == timedelta(seconds=1)
assert settings.min_idle_time == timedelta(seconds=5)
assert settings.max_retries == 3
assert settings.trim_every == timedelta(seconds=60)
assert settings.should_process_pending_messages is True
assert settings.starting_message_id == "0"
assert settings.automatically_acknowledge is True
def test_publisher_settings_can_be_overridden(
self, monkeypatch: pytest.MonkeyPatch
):
"""Test that Redis publisher settings can be overridden."""
monkeypatch.setenv("PREFECT_REDIS_MESSAGING_PUBLISHER_BATCH_SIZE", "10")
settings = RedisMessagingPublisherSettings()
assert settings.batch_size == 10
def test_consumer_settings_can_be_overridden(self, monkeypatch: pytest.MonkeyPatch):
"""Test that Redis consumer settings can be overridden."""
monkeypatch.setenv("PREFECT_REDIS_MESSAGING_CONSUMER_BLOCK", "10")
settings = RedisMessagingConsumerSettings()
assert settings.block == timedelta(seconds=10)
async def test_trimming_with_no_delivered_messages(redis: Redis):
"""Test that stream trimming handles the case where no messages have been delivered."""
stream_name = "test-trim-stream"
# Create a stream with some messages
await redis.xadd(stream_name, {"data": "test1"})
await redis.xadd(stream_name, {"data": "test2"})
# Create consumer groups that haven't consumed anything (last-delivered-id = "0-0")
await redis.xgroup_create(stream_name, "group1", id="0", mkstream=True)
await redis.xgroup_create(stream_name, "group2", id="0", mkstream=True)
# This should not raise a ValueError due to min() on empty sequence
await _trim_stream_to_lowest_delivered_id(stream_name)
# Stream should remain unchanged since no messages were delivered
length = await redis.xlen(stream_name)
assert length == 2
async def test_trimming_skips_idle_consumer_groups(
redis: Redis, monkeypatch: pytest.MonkeyPatch
):
"""Test that stream trimming skips consumer groups with all consumers idle beyond threshold."""
stream_name = "test-trim-idle-stream"
# Create a stream with 10 messages
message_ids = []
for i in range(10):
msg_id = await redis.xadd(stream_name, {"data": f"test{i}"})
message_ids.append(msg_id)
# Create two consumer groups
await redis.xgroup_create(stream_name, "active-group", id="0")
await redis.xgroup_create(stream_name, "stuck-group", id="0")
# Active group consumes all messages
messages = await redis.xreadgroup(
groupname="active-group",
consumername="consumer-1",
streams={stream_name: ">"},
count=10,
)
for stream, msgs in messages:
for msg_id, data in msgs:
await redis.xack(stream_name, "active-group", msg_id)
# Stuck group only consumes first 3 messages
messages = await redis.xreadgroup(
groupname="stuck-group",
consumername="consumer-2",
streams={stream_name: ">"},
count=3,
)
stuck_last_id = None
for stream, msgs in messages:
for msg_id, data in msgs:
await redis.xack(stream_name, "stuck-group", msg_id)
stuck_last_id = msg_id
# Wait to make consumers idle (need to wait longer than the threshold)
await asyncio.sleep(1.5)
# Set a very short idle threshold for testing (1 second)
# The setting expects seconds as an integer
monkeypatch.setenv("PREFECT_REDIS_MESSAGING_CONSUMER_TRIM_IDLE_THRESHOLD", "1")
# Create a new active consumer to allow trimming
# This simulates an active consumer that keeps processing
await redis.xreadgroup(
groupname="active-group",
consumername="consumer-3", # New consumer with fresh idle time
streams={stream_name: ">"},
count=1,
)
# Trim the stream - should skip the stuck group but use the active group
await _trim_stream_to_lowest_delivered_id(stream_name)
# Check results
stream_info = await redis.xinfo_stream(stream_name)
first_entry_id = (
stream_info["first-entry"][0] if stream_info["first-entry"] else None
)
# The stream should be trimmed past the stuck group's position
assert first_entry_id is not None
assert first_entry_id > stuck_last_id
# Should have trimmed most messages (keeping only the last one or few)
assert (
stream_info["length"] <= 2
) # Redis might keep 1-2 messages due to trimming behavior
async def test_cleanup_empty_consumer_groups(redis: Redis):
"""Test that empty consumer groups are cleaned up."""
stream_name = "test-cleanup-stream"
# Create a stream with a message
await redis.xadd(stream_name, {"data": "test"})
# Create multiple consumer groups
await redis.xgroup_create(stream_name, "ephemeral-active-group", id="0")
await redis.xgroup_create(stream_name, "ephemeral-empty-group-1", id="0")
await redis.xgroup_create(stream_name, "ephemeral-empty-group-2", id="0")
# Add a consumer to the active group
await redis.xreadgroup(
groupname="ephemeral-active-group",
consumername="consumer-1",
streams={stream_name: ">"},
count=1,
)
# Verify all groups exist
groups_before = await redis.xinfo_groups(stream_name)
assert len(groups_before) == 3
group_names_before = {g["name"] for g in groups_before}
assert group_names_before == {
"ephemeral-active-group",
"ephemeral-empty-group-1",
"ephemeral-empty-group-2",
}
# Run cleanup
await _cleanup_empty_consumer_groups(stream_name)
# Verify only the active group remains
groups_after = await redis.xinfo_groups(stream_name)
assert len(groups_after) == 1
assert groups_after[0]["name"] == "ephemeral-active-group"
| TestRedisMessagingSettings |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 8739,
"end": 9921
} | class ____(TransactionTestCase):
def test_create_get_session_from_sqlalchemy_session_factory(self):
class SessionGetterFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session_factory = lambda: models.session
id = factory.Sequence(lambda n: n)
SessionGetterFactory.create()
self.assertEqual(SessionGetterFactory._meta.sqlalchemy_session, models.session)
# Reuse the session obtained from sqlalchemy_session_factory.
SessionGetterFactory.create()
def test_create_raise_exception_sqlalchemy_session_factory_not_callable(self):
message = "^Provide either a sqlalchemy_session or a sqlalchemy_session_factory, not both$"
with self.assertRaisesRegex(RuntimeError, message):
class SessionAndGetterFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = models.session
sqlalchemy_session_factory = lambda: models.session
id = factory.Sequence(lambda n: n)
| SQLAlchemySessionFactoryTestCase |
python | ipython__ipython | tests/test_interactivshell.py | {
"start": 2449,
"end": 3311
} | class ____(unittest.TestCase):
def test_adjust_completion_text_based_on_context(self):
# Adjusted case
self.assertEqual(
_adjust_completion_text_based_on_context("arg1=", "func1(a=)", 7), "arg1"
)
# Untouched cases
self.assertEqual(
_adjust_completion_text_based_on_context("arg1=", "func1(a)", 7), "arg1="
)
self.assertEqual(
_adjust_completion_text_based_on_context("arg1=", "func1(a", 7), "arg1="
)
self.assertEqual(
_adjust_completion_text_based_on_context("%magic", "func1(a=)", 7), "%magic"
)
self.assertEqual(
_adjust_completion_text_based_on_context("func2", "func1(a=)", 7), "func2"
)
# Decorator for interaction loop tests -----------------------------------------
| TestContextAwareCompletion |
python | openai__gym | gym/envs/toy_text/frozen_lake.py | {
"start": 1916,
"end": 13715
} | class ____(Env):
"""
Frozen lake involves crossing a frozen lake from Start(S) to Goal(G) without falling into any Holes(H)
by walking over the Frozen(F) lake.
The agent may not always move in the intended direction due to the slippery nature of the frozen lake.
### Action Space
The agent takes a 1-element vector for actions.
The action space is `(dir)`, where `dir` decides direction to move in which can be:
- 0: LEFT
- 1: DOWN
- 2: RIGHT
- 3: UP
### Observation Space
The observation is a value representing the agent's current position as
current_row * nrows + current_col (where both the row and col start at 0).
For example, the goal position in the 4x4 map can be calculated as follows: 3 * 4 + 3 = 15.
The number of possible observations is dependent on the size of the map.
For example, the 4x4 map has 16 possible observations.
### Rewards
Reward schedule:
- Reach goal(G): +1
- Reach hole(H): 0
- Reach frozen(F): 0
### Arguments
```
gym.make('FrozenLake-v1', desc=None, map_name="4x4", is_slippery=True)
```
`desc`: Used to specify custom map for frozen lake. For example,
desc=["SFFF", "FHFH", "FFFH", "HFFG"].
A random generated map can be specified by calling the function `generate_random_map`. For example,
```
from gym.envs.toy_text.frozen_lake import generate_random_map
gym.make('FrozenLake-v1', desc=generate_random_map(size=8))
```
`map_name`: ID to use any of the preloaded maps.
"4x4":[
"SFFF",
"FHFH",
"FFFH",
"HFFG"
]
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG",
]
`is_slippery`: True/False. If True will move in intended direction with
probability of 1/3 else will move in either perpendicular direction with
equal probability of 1/3 in both directions.
For example, if action is left and is_slippery is True, then:
- P(move left)=1/3
- P(move up)=1/3
- P(move down)=1/3
### Version History
* v1: Bug fixes to rewards
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": ["human", "ansi", "rgb_array"],
"render_fps": 4,
}
def __init__(
self,
render_mode: Optional[str] = None,
desc=None,
map_name="4x4",
is_slippery=True,
):
if desc is None and map_name is None:
desc = generate_random_map()
elif desc is None:
desc = MAPS[map_name]
self.desc = desc = np.asarray(desc, dtype="c")
self.nrow, self.ncol = nrow, ncol = desc.shape
self.reward_range = (0, 1)
nA = 4
nS = nrow * ncol
self.initial_state_distrib = np.array(desc == b"S").astype("float64").ravel()
self.initial_state_distrib /= self.initial_state_distrib.sum()
self.P = {s: {a: [] for a in range(nA)} for s in range(nS)}
def to_s(row, col):
return row * ncol + col
def inc(row, col, a):
if a == LEFT:
col = max(col - 1, 0)
elif a == DOWN:
row = min(row + 1, nrow - 1)
elif a == RIGHT:
col = min(col + 1, ncol - 1)
elif a == UP:
row = max(row - 1, 0)
return (row, col)
def update_probability_matrix(row, col, action):
newrow, newcol = inc(row, col, action)
newstate = to_s(newrow, newcol)
newletter = desc[newrow, newcol]
terminated = bytes(newletter) in b"GH"
reward = float(newletter == b"G")
return newstate, reward, terminated
for row in range(nrow):
for col in range(ncol):
s = to_s(row, col)
for a in range(4):
li = self.P[s][a]
letter = desc[row, col]
if letter in b"GH":
li.append((1.0, s, 0, True))
else:
if is_slippery:
for b in [(a - 1) % 4, a, (a + 1) % 4]:
li.append(
(1.0 / 3.0, *update_probability_matrix(row, col, b))
)
else:
li.append((1.0, *update_probability_matrix(row, col, a)))
self.observation_space = spaces.Discrete(nS)
self.action_space = spaces.Discrete(nA)
self.render_mode = render_mode
# pygame utils
self.window_size = (min(64 * ncol, 512), min(64 * nrow, 512))
self.cell_size = (
self.window_size[0] // self.ncol,
self.window_size[1] // self.nrow,
)
self.window_surface = None
self.clock = None
self.hole_img = None
self.cracked_hole_img = None
self.ice_img = None
self.elf_images = None
self.goal_img = None
self.start_img = None
def step(self, a):
transitions = self.P[self.s][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, t = transitions[i]
self.s = s
self.lastaction = a
if self.render_mode == "human":
self.render()
return (int(s), r, t, False, {"prob": p})
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
):
super().reset(seed=seed)
self.s = categorical_sample(self.initial_state_distrib, self.np_random)
self.lastaction = None
if self.render_mode == "human":
self.render()
return int(self.s), {"prob": 1}
def render(self):
if self.render_mode is None:
logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
elif self.render_mode == "ansi":
return self._render_text()
else: # self.render_mode in {"human", "rgb_array"}:
return self._render_gui(self.render_mode)
def _render_gui(self, mode):
try:
import pygame
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[toy_text]`"
)
if self.window_surface is None:
pygame.init()
if mode == "human":
pygame.display.init()
pygame.display.set_caption("Frozen Lake")
self.window_surface = pygame.display.set_mode(self.window_size)
elif mode == "rgb_array":
self.window_surface = pygame.Surface(self.window_size)
assert (
self.window_surface is not None
), "Something went wrong with pygame. This should never happen."
if self.clock is None:
self.clock = pygame.time.Clock()
if self.hole_img is None:
file_name = path.join(path.dirname(__file__), "img/hole.png")
self.hole_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.cracked_hole_img is None:
file_name = path.join(path.dirname(__file__), "img/cracked_hole.png")
self.cracked_hole_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.ice_img is None:
file_name = path.join(path.dirname(__file__), "img/ice.png")
self.ice_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.goal_img is None:
file_name = path.join(path.dirname(__file__), "img/goal.png")
self.goal_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.start_img is None:
file_name = path.join(path.dirname(__file__), "img/stool.png")
self.start_img = pygame.transform.scale(
pygame.image.load(file_name), self.cell_size
)
if self.elf_images is None:
elfs = [
path.join(path.dirname(__file__), "img/elf_left.png"),
path.join(path.dirname(__file__), "img/elf_down.png"),
path.join(path.dirname(__file__), "img/elf_right.png"),
path.join(path.dirname(__file__), "img/elf_up.png"),
]
self.elf_images = [
pygame.transform.scale(pygame.image.load(f_name), self.cell_size)
for f_name in elfs
]
desc = self.desc.tolist()
assert isinstance(desc, list), f"desc should be a list or an array, got {desc}"
for y in range(self.nrow):
for x in range(self.ncol):
pos = (x * self.cell_size[0], y * self.cell_size[1])
rect = (*pos, *self.cell_size)
self.window_surface.blit(self.ice_img, pos)
if desc[y][x] == b"H":
self.window_surface.blit(self.hole_img, pos)
elif desc[y][x] == b"G":
self.window_surface.blit(self.goal_img, pos)
elif desc[y][x] == b"S":
self.window_surface.blit(self.start_img, pos)
pygame.draw.rect(self.window_surface, (180, 200, 230), rect, 1)
# paint the elf
bot_row, bot_col = self.s // self.ncol, self.s % self.ncol
cell_rect = (bot_col * self.cell_size[0], bot_row * self.cell_size[1])
last_action = self.lastaction if self.lastaction is not None else 1
elf_img = self.elf_images[last_action]
if desc[bot_row][bot_col] == b"H":
self.window_surface.blit(self.cracked_hole_img, cell_rect)
else:
self.window_surface.blit(elf_img, cell_rect)
if mode == "human":
pygame.event.pump()
pygame.display.update()
self.clock.tick(self.metadata["render_fps"])
elif mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.window_surface)), axes=(1, 0, 2)
)
@staticmethod
def _center_small_rect(big_rect, small_dims):
offset_w = (big_rect[2] - small_dims[0]) / 2
offset_h = (big_rect[3] - small_dims[1]) / 2
return (
big_rect[0] + offset_w,
big_rect[1] + offset_h,
)
def _render_text(self):
desc = self.desc.tolist()
outfile = StringIO()
row, col = self.s // self.ncol, self.s % self.ncol
desc = [[c.decode("utf-8") for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
if self.lastaction is not None:
outfile.write(f" ({['Left', 'Down', 'Right', 'Up'][self.lastaction]})\n")
else:
outfile.write("\n")
outfile.write("\n".join("".join(line) for line in desc) + "\n")
with closing(outfile):
return outfile.getvalue()
def close(self):
if self.window_surface is not None:
import pygame
pygame.display.quit()
pygame.quit()
# Elf and stool from https://franuka.itch.io/rpg-snow-tileset
# All other assets by Mel Tillery http://www.cyaneus.com/
| FrozenLakeEnv |
python | django__django | tests/model_meta/tests.py | {
"start": 9246,
"end": 10194
} | class ____(SimpleTestCase):
def test_plain_model_none(self):
self.assertIsNone(Relation._meta.swapped)
def test_unset(self):
self.assertIsNone(Swappable._meta.swapped)
def test_set_and_unset(self):
with override_settings(MODEL_META_TESTS_SWAPPED="model_meta.Relation"):
self.assertEqual(Swappable._meta.swapped, "model_meta.Relation")
self.assertIsNone(Swappable._meta.swapped)
def test_setting_none(self):
with override_settings(MODEL_META_TESTS_SWAPPED=None):
self.assertIsNone(Swappable._meta.swapped)
def test_setting_non_label(self):
with override_settings(MODEL_META_TESTS_SWAPPED="not-a-label"):
self.assertEqual(Swappable._meta.swapped, "not-a-label")
def test_setting_self(self):
with override_settings(MODEL_META_TESTS_SWAPPED="model_meta.swappable"):
self.assertIsNone(Swappable._meta.swapped)
| SwappedTests |
python | spack__spack | lib/spack/spack/version/common.py | {
"start": 901,
"end": 1007
} | class ____(VersionError):
"""Raised for errors looking up git commits as versions."""
| VersionLookupError |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 29736,
"end": 30588
} | class ____(TestCase):
def test_basic(self):
for iterable, expected in [
([], []),
([1], [[1]]),
([1, 2], [[1], [1, 2], [2]]),
(iter([1, 2]), [[1], [1, 2], [2]]),
([2, 1], [[2], [2, 1], [1]]),
(
'ABCD',
[
['A'],
['A', 'B'],
['A', 'B', 'C'],
['A', 'B', 'C', 'D'],
['B'],
['B', 'C'],
['B', 'C', 'D'],
['C'],
['C', 'D'],
['D'],
],
),
]:
with self.subTest(expected=expected):
actual = list(mi.subslices(iterable))
self.assertEqual(actual, expected)
| SubslicesTests |
python | ray-project__ray | python/ray/_private/authentication/grpc_authentication_client_interceptor.py | {
"start": 1063,
"end": 2892
} | class ____(
grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
"""Synchronous gRPC client interceptor that adds authentication metadata."""
def _intercept_call_details(self, client_call_details):
"""Helper method to add authentication metadata to client call details."""
metadata = list(client_call_details.metadata or [])
metadata.extend(_get_authentication_metadata_tuple())
return _ClientCallDetails(
method=client_call_details.method,
timeout=client_call_details.timeout,
metadata=metadata,
credentials=client_call_details.credentials,
wait_for_ready=getattr(client_call_details, "wait_for_ready", None),
compression=getattr(client_call_details, "compression", None),
)
def intercept_unary_unary(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request)
def intercept_unary_stream(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request_iterator)
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request_iterator)
| SyncAuthenticationMetadataClientInterceptor |
python | sympy__sympy | sympy/diffgeom/diffgeom.py | {
"start": 33855,
"end": 36314
} | class ____(Expr):
r"""Commutator of two vector fields.
Explanation
===========
The commutator of two vector fields `v_1` and `v_2` is defined as the
vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal
to `v_1(v_2(f)) - v_2(v_1(f))`.
Examples
========
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import Commutator
>>> from sympy import simplify
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_r = R2_p.base_vector(0)
>>> c_xy = Commutator(e_x, e_y)
>>> c_xr = Commutator(e_x, e_r)
>>> c_xy
0
Unfortunately, the current code is not able to compute everything:
>>> c_xr
Commutator(e_x, e_rho)
>>> simplify(c_xr(fy**2))
-2*cos(theta)*y**2/(x**2 + y**2)
"""
def __new__(cls, v1, v2):
if (covariant_order(v1) or contravariant_order(v1) != 1
or covariant_order(v2) or contravariant_order(v2) != 1):
raise ValueError(
'Only commutators of vector fields are supported.')
if v1 == v2:
return S.Zero
coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)])
if len(coord_sys) == 1:
# Only one coordinate systems is used, hence it is easy enough to
# actually evaluate the commutator.
if all(isinstance(v, BaseVectorField) for v in (v1, v2)):
return S.Zero
bases_1, bases_2 = [list(v.atoms(BaseVectorField))
for v in (v1, v2)]
coeffs_1 = [v1.expand().coeff(b) for b in bases_1]
coeffs_2 = [v2.expand().coeff(b) for b in bases_2]
res = 0
for c1, b1 in zip(coeffs_1, bases_1):
for c2, b2 in zip(coeffs_2, bases_2):
res += c1*b1(c2)*b2 - c2*b2(c1)*b1
return res
else:
obj = super().__new__(cls, v1, v2)
obj._v1 = v1 # deprecated assignment
obj._v2 = v2 # deprecated assignment
return obj
@property
def v1(self):
return self.args[0]
@property
def v2(self):
return self.args[1]
def __call__(self, scalar_field):
"""Apply on a scalar field.
If the argument is not a scalar field an error is raised.
"""
return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field))
| Commutator |
python | walkccc__LeetCode | solutions/1975. Maximum Matrix Sum/1975.py | {
"start": 0,
"end": 392
} | class ____:
def maxMatrixSum(self, matrix: list[list[int]]) -> int:
absSum = 0
minAbs = math.inf
# 0 := even number of negatives
# 1 := odd number of negatives
oddNeg = 0
for row in matrix:
for num in row:
absSum += abs(num)
minAbs = min(minAbs, abs(num))
if num < 0:
oddNeg ^= 1
return absSum - oddNeg * minAbs * 2
| Solution |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 138067,
"end": 139052
} | class ____(test_util.TensorFlowTestCase):
def testToArrayEagerMode(self):
with context.eager_mode():
a = np.array(constant_op.constant(32), dtype=np.float32)
b = np.array(constant_op.constant(32, dtype=dtypes.int64))
self.assertEqual(a.dtype, np.dtype(np.float32))
self.assertEqual(b.dtype, np.dtype(np.int64))
def testToArrayFunctionMode(self):
@def_function.function
def f():
# Raises during trace compilation.
return np.array(constant_op.constant(32), dtype=np.int32)
@def_function.function
def g():
# Raises during trace compilation.
return np.array(constant_op.constant(32))
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic tf.Tensor"):
f()
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic tf.Tensor"):
g()
if __name__ == "__main__":
googletest.main()
| TensorTest |
python | cookiecutter__cookiecutter | cookiecutter/extensions.py | {
"start": 883,
"end": 1507
} | class ____(Extension):
"""Jinja2 extension to create a random string."""
def __init__(self, environment: Environment) -> None:
"""Jinja2 Extension Constructor."""
super().__init__(environment)
def random_ascii_string(length: int, punctuation: bool = False) -> str:
if punctuation:
corpus = f'{string.ascii_letters}{string.punctuation}'
else:
corpus = string.ascii_letters
return "".join(choice(corpus) for _ in range(length))
environment.globals.update(random_ascii_string=random_ascii_string)
| RandomStringExtension |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/matrix_solve_op_test.py | {
"start": 6132,
"end": 8896
} | class ____(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
| MatrixSolveBenchmark |
python | pytorch__pytorch | test/jit/test_slice.py | {
"start": 439,
"end": 5238
} | class ____(JitTestCase):
def test_slice_kwarg(self):
def slice_kwarg(x: List[int]):
return x[slice(1, stop=2)]
with self.assertRaisesRegex(
RuntimeError, "Slice does not accept any keyword arguments"
):
torch.jit.script(slice_kwarg)
def test_slice_three_nones(self):
def three_nones(x: List[int]):
return x[slice(None, None, None)]
self.checkScript(three_nones, (range(10),))
def test_slice_two_nones(self):
def two_nones(x: List[int]):
return x[slice(None, None)]
self.checkScript(two_nones, (range(10),))
def test_slice_one_none(self):
def one_none(x: List[int]):
return x[slice(None)]
self.checkScript(one_none, (range(10),))
def test_slice_stop_only(self):
def fn(x: List[int]):
return x[slice(5)]
self.checkScript(fn, (range(10),))
def test_slice_stop_only_with_nones(self):
def fn(x: List[int]):
return x[slice(None, 5, None)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop(self):
def fn(x: List[int]):
return x[slice(1, 5)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop_with_none(self):
def fn(x: List[int]):
return x[slice(1, 5, None)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop_step(self):
def fn(x: List[int]):
return x[slice(0, 6, 2)]
self.checkScript(fn, (range(10),))
def test_slice_string(self):
def fn(x: str):
return x[slice(None, 3, 1)]
self.checkScript(fn, ("foo_bar",))
def test_slice_tensor(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1)]
self.checkScript(fn, (torch.ones(10),))
def test_slice_tensor_multidim(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1), 0]
self.checkScript(fn, (torch.ones((10, 10)),))
def test_slice_tensor_multidim_with_dots(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1), ...]
self.checkScript(fn, (torch.ones((10, 10)),))
def test_slice_as_variable(self):
def fn(x: List[int]):
a = slice(1)
return x[a]
self.checkScript(fn, (range(10),))
def test_slice_stop_clipped(self):
def fn(x: List[int]):
return x[slice(1000)]
self.checkScript(fn, (range(10),))
def test_slice_dynamic_index(self):
def t(x):
slice1 = x[0:1]
zero = 0
one = zero + 1
slice2 = x[zero:one]
return slice1 + slice2
self.checkScript(t, (torch.zeros(3, 2, 3),))
def test_tuple_slicing(self):
def tuple_slice(a):
if bool(a):
b = (1, 2, 3, 4)
else:
b = (4, 3, 2, 1)
c = b[-4:4]
e = c[1:-1]
return e
self.checkScript(tuple_slice, (torch.tensor([1]),), optimize=True)
scripted_fn = torch.jit.script(tuple_slice)
self.assertEqual(scripted_fn(torch.tensor(1)), (2, 3))
tuple_graph = scripted_fn.graph
slices = tuple_graph.findAllNodes("prim::TupleConstruct")
num_outputs = {len(x.output().type().elements()) for x in slices}
# there should be only one tupleSlice with length of 2
self.assertTrue(num_outputs == {2})
self.run_pass("lower_all_tuples", tuple_graph)
self.assertTrue("Tuple" not in str(tuple_graph))
def test_module_list_slicing(self):
class Bar(torch.nn.Module):
def __init__(self, identifier: str):
super().__init__()
self.identifier = identifier
def forward(self):
return 0
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
module_list = [Bar("A"), Bar("B"), Bar("C"), Bar("D"), Bar("E")]
self.test = torch.nn.ModuleList(module_list)
def forward(self):
return self.test[::-2], self.test[1:4:]
scripted_foo = torch.jit.script(Foo())
result1, result2 = scripted_foo()
self.assertEqual(len(result1), 3)
self.assertEqual(result1[0].identifier, "E")
self.assertEqual(result1[1].identifier, "C")
self.assertEqual(result1[2].identifier, "A")
self.assertEqual(len(result2), 3)
self.assertEqual(result2[0].identifier, "B")
self.assertEqual(result2[1].identifier, "C")
self.assertEqual(result2[2].identifier, "D")
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestSlice |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_access_request_details.py | {
"start": 232,
"end": 2178
} | class ____(APITestCase):
def test_only_returns_valid_requests(self) -> None:
owner_user = self.create_user("owner@example.com")
organization = self.create_organization(owner=owner_user)
team = self.create_team(organization=organization)
self.create_team_membership(team=team, user=owner_user)
joined_team_member = self.create_member(
organization=organization,
role="member",
user=self.create_user("joined-team@example.com"),
)
invite_email_member = self.create_member(
organization=organization,
role="member",
email="invite-email@example.com",
)
not_joined_team_member = self.create_member(
organization=organization,
role="member",
user=self.create_user("not-joined-team@example.com"),
)
OrganizationAccessRequest.objects.create(member=joined_team_member, team=team)
OrganizationAccessRequest.objects.create(member=invite_email_member, team=team)
not_joined_request = OrganizationAccessRequest.objects.create(
member=not_joined_team_member, team=team
)
self.create_team_membership(team=team, member=joined_team_member)
self.login_as(owner_user)
resp = self.client.get(
reverse("sentry-api-0-organization-access-requests", args=[organization.slug])
)
# We omit the request that has already been fulfilled by a user joining the team some other way.
# We also omit email invites to teams (since those cannot be approved until the user creates a Sentry account)
assert len(resp.data) == 1
assert resp.data[0]["id"] == str(not_joined_request.id)
assert resp.data[0]["member"]["id"] == str(not_joined_request.member.id)
assert resp.data[0]["team"]["id"] == str(not_joined_request.team.id)
| GetOrganizationAccessRequestTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_emr.py | {
"start": 5548,
"end": 9230
} | class ____(BaseAwsLinksTestCase):
link_class = EmrServerlessDashboardLink
def test_extra_link(self, mocked_emr_serverless_hook, mock_supervisor_comms):
mocked_client = mocked_emr_serverless_hook.return_value.conn
mocked_client.get_dashboard_for_job_run.return_value = {"url": "https://example.com/?authToken=1234"}
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"conn_id": "aws-test",
"application_id": "app-id",
"job_run_id": "job-run-id",
},
)
self.assert_extra_link_url(
expected_url="https://example.com/?authToken=1234",
conn_id="aws-test",
application_id="app-id",
job_run_id="job-run-id",
)
mocked_emr_serverless_hook.assert_called_with(
aws_conn_id="aws-test", config={"retries": {"total_max_attempts": 1}}
)
mocked_client.get_dashboard_for_job_run.assert_called_with(
applicationId="app-id",
jobRunId="job-run-id",
)
@pytest.mark.parametrize(
("dashboard_info", "expected_uri"),
[
pytest.param(
{"url": "https://example.com/?authToken=first-unique-value"},
"https://example.com/?authToken=first-unique-value",
id="first-call",
),
pytest.param(
{"url": "https://example.com/?authToken=second-unique-value"},
"https://example.com/?authToken=second-unique-value",
id="second-call",
),
],
)
def test_get_serverless_dashboard_url_with_client(mocked_emr_serverless_hook, dashboard_info, expected_uri):
mocked_client = mocked_emr_serverless_hook.return_value.conn
mocked_client.get_dashboard_for_job_run.return_value = dashboard_info
url = get_serverless_dashboard_url(
emr_serverless_client=mocked_client, application_id="anything", job_run_id="anything"
)
assert url
assert url.geturl() == expected_uri
mocked_emr_serverless_hook.assert_not_called()
mocked_client.get_dashboard_for_job_run.assert_called_with(
applicationId="anything",
jobRunId="anything",
)
def test_get_serverless_dashboard_url_with_conn_id(mocked_emr_serverless_hook):
mocked_client = mocked_emr_serverless_hook.return_value.conn
mocked_client.get_dashboard_for_job_run.return_value = {
"url": "https://example.com/?authToken=some-unique-value"
}
url = get_serverless_dashboard_url(
aws_conn_id="aws-test", application_id="anything", job_run_id="anything"
)
assert url
assert url.geturl() == "https://example.com/?authToken=some-unique-value"
mocked_emr_serverless_hook.assert_called_with(
aws_conn_id="aws-test", config={"retries": {"total_max_attempts": 1}}
)
mocked_client.get_dashboard_for_job_run.assert_called_with(
applicationId="anything",
jobRunId="anything",
)
def test_get_serverless_dashboard_url_parameters():
with pytest.raises(
AirflowException, match="Requires either an AWS connection ID or an EMR Serverless Client"
):
get_serverless_dashboard_url(application_id="anything", job_run_id="anything")
with pytest.raises(
AirflowException, match="Requires either an AWS connection ID or an EMR Serverless Client"
):
get_serverless_dashboard_url(
aws_conn_id="a", emr_serverless_client="b", application_id="anything", job_run_id="anything"
)
| TestEmrServerlessDashboardLink |
python | tensorflow__tensorflow | third_party/xla/xla/tools/buffer_debug_log/checksum_mismatch_report.py | {
"start": 2717,
"end": 8232
} | class ____:
"""A report of checksum mismatches for a thunk."""
thunk_metadata: dict[ThunkId, ThunkMetadata]
# Thunks for which different executions produced different results. The value
# is a input checksums => output checksum sets dict containing the info about
# inconsistent outptus, and the checksums of inputs that caused them.
mismatches: dict[
ThunkId, dict[BufferChecksums, dict[BufferIdx, set[Checksum]]]
]
@classmethod
def from_protos(
cls,
log_protos: dict[
ModuleExecutionId, buffer_debug_log_pb2.BufferDebugLogProto
],
metadata_proto: thunk_pb2.ThunkMetadataListProto,
) -> Self:
"""Creates a ChecksumMismatchReport from protobufs.
Args:
log_protos: A dict of BufferDebugLogProto keyed by module execution ID.
metadata_proto: A ThunkMetadataListProto.
Preconditions:
- All log protos must refer to the same HLO module.
- metadata proto must describe the same HLO module as the log protos or be
an empty proto.
"""
metadata = _parse_metadata(metadata_proto)
executions = itertools.chain.from_iterable(
_parse_log(module_execution_id, log_proto)
for module_execution_id, log_proto in log_protos.items()
)
mismatches = _find_inconsistent_thunks(executions)
return cls(metadata, mismatches)
K = TypeVar("K")
T = TypeVar("T")
def group_by(
values: Iterable[T], key_getter: Callable[[T], K]
) -> dict[K, list[T]]:
"""Groups a sequence by a key function."""
result = collections.defaultdict(list)
for item in values:
result[key_getter(item)].append(item)
return result
def _parse_metadata(
metadata_proto: thunk_pb2.ThunkMetadataListProto,
) -> dict[ThunkId, ThunkMetadata]:
"""Parses a ThunkMetadataListProto into a dict of ThunkMetadata."""
metadata_by_thunk_id: dict[ThunkId, ThunkMetadata] = {}
for metadata in metadata_proto.thunk_metadata:
thunk_id = ThunkId(metadata.thunk_info.thunk_id)
metadata_by_thunk_id[thunk_id] = ThunkMetadata(
thunk_id=thunk_id,
thunk_kind=metadata.thunk_kind,
profile_annotation=metadata.thunk_info.profile_annotation,
)
return metadata_by_thunk_id
def _parse_log(
module_execution: int,
log_proto: buffer_debug_log_pb2.BufferDebugLogProto,
) -> list[ThunkExecution]:
"""Parses a BufferDebugLogProto and ThunkMetadataListProto into a list of ThunkExecutions."""
entries_by_execution = group_by(
log_proto.entries, lambda entry: (entry.thunk_id, entry.execution_id)
)
executions = [
ThunkExecution(
module_execution_id=module_execution,
thunk_execution_id=execution_id,
thunk_id=thunk_id,
input_checksums=BufferChecksums({
entry.buffer_idx: entry.checksum
for entry in entries
if entry.is_input_buffer
}),
output_checksums=BufferChecksums({
entry.buffer_idx: entry.checksum
for entry in entries
if not entry.is_input_buffer
}),
)
for (thunk_id, execution_id), entries in entries_by_execution.items()
]
return executions
def _find_inconsistent_output_checksums(
executions: list[ThunkExecution],
) -> dict[BufferIdx, set[Checksum]]:
"""Finds mismatches in output checksums for a list of identical executions.
Args:
executions: A list of executions of the same thunk on the same input
arguments.
Returns:
A dict of buffers whose contents were not consistent across executions with
the same inputs, based on the checksum value. The value is a set of
checksums observed for that buffer.
"""
checksums_by_buffer_idx: dict[BufferIdx, set[Checksum]] = (
collections.defaultdict(set)
)
for execution in executions:
for buffer_idx, checksum in execution.output_checksums.checksums.items():
checksums_by_buffer_idx[buffer_idx].add(checksum)
return {
buffer_idx: checksums
for buffer_idx, checksums in checksums_by_buffer_idx.items()
if len(checksums) > 1
}
def _find_inconsistent_thunks(
executions: Iterable[ThunkExecution],
) -> dict[ThunkId, dict[BufferChecksums, dict[BufferIdx, set[Checksum]]]]:
"""Finds thunks with inconsistent output checksums across identical executions.
Args:
executions: A arbitrary list of thunk executions.
Returns:
A dict of thunks whose outputs were inconsistent across identical
executions.
The value is a dict keyed by the set of input checksums, with values
identifying the output buffers with inconsistent checksums, along with the
set of observed checksums for each.
"""
executions_by_thunk_id: dict[ThunkId, list[ThunkExecution]] = group_by(
executions,
lambda e: e.thunk_id,
)
mismatches: dict[
ThunkId, dict[BufferChecksums, dict[BufferIdx, set[Checksum]]]
] = {}
for thunk_id, executions in executions_by_thunk_id.items():
executions_by_inputs: dict[BufferChecksums, list[ThunkExecution]] = (
group_by(executions, lambda e: e.input_checksums)
)
mismatches_by_inputs: dict[
BufferChecksums, dict[BufferIdx, set[Checksum]]
] = {}
for input_checksums, executions in executions_by_inputs.items():
m = _find_inconsistent_output_checksums(executions)
if m:
mismatches_by_inputs[input_checksums] = m
if mismatches_by_inputs:
mismatches[thunk_id] = mismatches_by_inputs
return mismatches
| ChecksumMismatchReport |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 7283,
"end": 7555
} | class ____(models.Model):
def slugify_function(self, content):
return content.upper()
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from="title")
class Meta:
app_label = "django_extensions"
| CustomFuncSluggedTestModel |
python | google__pytype | pytype/load_pytd.py | {
"start": 3628,
"end": 3955
} | class ____(Exception):
"""If we can't resolve a module referenced by the one we're trying to load."""
def __init__(self, module_error, src=None):
referenced = f", referenced from {src!r}" if src else ""
super().__init__(module_error + referenced)
def __str__(self):
return str(self.args[0])
| BadDependencyError |
python | huggingface__transformers | src/transformers/models/modernbert_decoder/modeling_modernbert_decoder.py | {
"start": 24324,
"end": 29060
} | class ____(ModernBertDecoderPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"decoder.weight": "model.embeddings.tok_embeddings.weight"}
def __init__(self, config: ModernBertDecoderConfig):
super().__init__(config)
self.config = config
self.model = ModernBertDecoderModel(config)
self.lm_head = ModernBertDecoderPredictionHead(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, new_embeddings):
self.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
[`~modeling_outputs.CausalLMOutputWithPast`]
comprising various elements depending on the configuration and inputs.
Example:
```python
>>> from transformers import AutoTokenizer, ModernBertDecoderForCausalLM
>>> model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec")
>>> tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
>>> prompt = "The capital of France is"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=1)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The capital of France is Paris"
```
"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.decoder(self.lm_head(hidden_states[:, slice_indices, :]))
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The ModernBert Decoder Model with a sequence classification head on top (linear layer).
[`ModernBertDecoderForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1, GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
| ModernBertDecoderForCausalLM |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 8738,
"end": 8787
} | class ____:
def m0(self, arg):
pass
| A20 |
python | getsentry__sentry | src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py | {
"start": 340,
"end": 543
} | class ____:
"""Both the function and path pattern must match for a frame to be considered a SDK frame."""
function_pattern: str
path_pattern: str
@dataclass(frozen=True)
| FunctionAndPathPattern |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/model_fallback.py | {
"start": 441,
"end": 4082
} | class ____(AgentMiddleware):
"""Automatic fallback to alternative models on errors.
Retries failed model calls with alternative models in sequence until
success or all models exhausted. Primary model specified in `create_agent`.
Example:
```python
from langchain.agents.middleware.model_fallback import ModelFallbackMiddleware
from langchain.agents import create_agent
fallback = ModelFallbackMiddleware(
"openai:gpt-4o-mini", # Try first on error
"anthropic:claude-sonnet-4-5-20250929", # Then this
)
agent = create_agent(
model="openai:gpt-4o", # Primary model
middleware=[fallback],
)
# If primary fails: tries gpt-4o-mini, then claude-sonnet-4-5-20250929
result = await agent.invoke({"messages": [HumanMessage("Hello")]})
```
"""
def __init__(
self,
first_model: str | BaseChatModel,
*additional_models: str | BaseChatModel,
) -> None:
"""Initialize model fallback middleware.
Args:
first_model: First fallback model (string name or instance).
*additional_models: Additional fallbacks in order.
"""
super().__init__()
# Initialize all fallback models
all_models = (first_model, *additional_models)
self.models: list[BaseChatModel] = []
for model in all_models:
if isinstance(model, str):
self.models.append(init_chat_model(model))
else:
self.models.append(model)
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
"""Try fallback models in sequence on errors.
Args:
request: Initial model request.
handler: Callback to execute the model.
Returns:
AIMessage from successful model call.
Raises:
Exception: If all models fail, re-raises last exception.
"""
# Try primary model first
last_exception: Exception
try:
return handler(request)
except Exception as e: # noqa: BLE001
last_exception = e
# Try fallback models
for fallback_model in self.models:
try:
return handler(request.override(model=fallback_model))
except Exception as e: # noqa: BLE001
last_exception = e
continue
raise last_exception
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
"""Try fallback models in sequence on errors (async version).
Args:
request: Initial model request.
handler: Async callback to execute the model.
Returns:
AIMessage from successful model call.
Raises:
Exception: If all models fail, re-raises last exception.
"""
# Try primary model first
last_exception: Exception
try:
return await handler(request)
except Exception as e: # noqa: BLE001
last_exception = e
# Try fallback models
for fallback_model in self.models:
try:
return await handler(request.override(model=fallback_model))
except Exception as e: # noqa: BLE001
last_exception = e
continue
raise last_exception
| ModelFallbackMiddleware |
python | ray-project__ray | python/ray/tune/execution/tune_controller.py | {
"start": 2318,
"end": 79008
} | class ____:
CKPT_FILE_TMPL = "experiment_state-{}.json"
RAISE = "RAISE"
def __init__(
self,
*,
search_alg: Optional[SearchAlgorithm] = None,
placeholder_resolvers: Optional[Dict[Tuple, Any]] = None,
scheduler: Optional[TrialScheduler] = None,
stopper: Optional[Stopper] = None,
resume_config: Optional[ResumeConfig] = None,
fail_fast: bool = False,
checkpoint_period: Union[str, int] = None,
callbacks: Optional[List[Callback]] = None,
metric: Optional[str] = None,
trial_checkpoint_config: Optional[CheckpointConfig] = None,
storage: Optional[StorageContext] = None,
reuse_actors: bool = False,
resource_manager_factory: Optional[Callable[[], ResourceManager]] = None,
_trainer_api: bool = False,
):
if resource_manager_factory:
resource_manager = resource_manager_factory()
else:
resource_manager = PlacementGroupResourceManager()
self._actor_manager = RayActorManager(resource_manager=resource_manager)
self._class_cache = _ActorClassCache()
# Resource status
self._resource_updater = _ResourceUpdater(None)
# Actor <-> Trial mappings
self._actor_to_trial: Dict[TrackedActor, Trial] = {}
self._trial_to_actor: Dict[Trial, TrackedActor] = {}
# Resources <-> Trial
self._resources_to_pending_trials: Dict[
ResourceRequest, Set[Trial]
] = defaultdict(set)
# Keep track of actor states
self._pending_trials: Set[Trial] = set()
self._pending_trials_list: List[Trial] = []
self._running_trials: Set[Trial] = set()
self._paused_trials: Set[Trial] = set()
self._stopped_trials: Set[Trial] = set()
self._failed_trials: Set[Trial] = set()
self._resetting_trials: Set[Trial] = set()
self._staged_trials: Set[Trial] = set()
# Removed actors
self._started_actors: Set[TrackedActor] = set()
# Map of tracked actors -> timestamp
# The timestamp is when we requested the stop.
# We track these actors here to force a
# cleanup after some time (as they might be hanging).
# Todo: This timeout logic should be moved into the actor manager.
# This map is populated whenever we request an actor stop:
# - Regular STOP decision
# - Removing an actor because its trial REUSEs a different trial's actor
# - Removing a cached actor because it's not needed anymore
# Actors are only tracked in this map if they actually started (not if they
# were only requested but never started).
# Actors are removed from this map:
# - When the STOP resolved and the actor actually stopped
# - When they are forcefully cleaned up after the timeout.
self._stopping_actors: Dict[TrackedActor, float] = {}
self._earliest_stopping_actor: float = float("inf")
self._actor_cleanup_timeout: int = int(
os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "600")
)
self._actor_force_cleanup_timeout: int = 10
# Reuse actors
self._reuse_actors = reuse_actors
self._actor_cache = _ObjectCache(may_keep_one=True)
# Trial metadata for experiment checkpoints
self._trials_to_cache: Set[Trial] = set()
self._trial_metadata: Dict[str, str] = {}
# TRAINING
self._buffer_length = int(os.getenv("TUNE_RESULT_BUFFER_LENGTH", 1))
self._buffer_min_time_s = float(os.getenv("TUNE_RESULT_BUFFER_MIN_TIME_S", 0.0))
self._buffer_max_time_s = float(
os.getenv("TUNE_RESULT_BUFFER_MAX_TIME_S", 100.0)
)
# Legacy TrialRunner init
self._search_alg = search_alg or BasicVariantGenerator()
self._placeholder_resolvers = placeholder_resolvers
self._scheduler_alg = scheduler or FIFOScheduler()
self._callbacks = CallbackList(callbacks or [])
self._insufficient_resources_manager = _InsufficientResourcesManager(
for_train=_trainer_api
)
self._pending_trial_queue_times = {}
self._max_pending_trials = _get_max_pending_trials(self._search_alg)
self._storage = storage
self._metric = metric
self._total_time = 0
self._iteration = 0
self._has_errored = False
self._fail_fast = fail_fast
if isinstance(self._fail_fast, str):
self._fail_fast = self._fail_fast.upper()
if self._fail_fast == self.RAISE:
warnings.warn(
"fail_fast='raise' detected. Be careful when using this "
"mode as resources (such as Ray processes, "
"file descriptors, and temporary files) may not be "
"cleaned up properly. To use "
"a safer mode, use fail_fast=True."
)
else:
raise ValueError(
"fail_fast must be one of {bool, RAISE}. " f"Got {self._fail_fast}."
)
self._print_trial_errors = bool(
int(os.environ.get("TUNE_PRINT_ALL_TRIAL_ERRORS", "1"))
)
self._trials: List[Trial] = []
self._live_trials: Set[Trial] = set() # Set of non-terminated trials
self._cached_trial_decisions = {}
self._queued_trial_decisions = {}
self._stop_queue = []
self._should_stop_experiment = False # used by TuneServer
self._stopper = stopper or NoopStopper()
self._start_time = time.time()
self._session_str = datetime.fromtimestamp(self._start_time).strftime(
"%Y-%m-%d_%H-%M-%S"
)
if checkpoint_period is None:
checkpoint_period = os.getenv("TUNE_GLOBAL_CHECKPOINT_S", "auto")
self._checkpoint_period = checkpoint_period
self._trial_checkpoint_config = trial_checkpoint_config or CheckpointConfig()
self._checkpoint_manager = self._create_checkpoint_manager()
self._resumed = False
if resume_config is not None:
# Use the metadata file to restore TuneController state
try:
self.resume(resume_config=resume_config)
self._resumed = True
except Exception as e:
if has_verbosity(Verbosity.V3_TRIAL_DETAILS):
logger.error(str(e))
logger.exception("Failed to restore the run state.")
if self._fail_fast:
raise
logger.info("Restarting experiment.")
else:
logger.debug("Starting a new experiment.")
def _wrapped(self):
"""Return wrapped tune controller to be passed to scheduler/searchers."""
return TrialRunnerWrapper(
self,
trial_executor=_FakeRayTrialExecutor(self),
runner_whitelist_attr={
"search_alg",
"get_trials",
"get_live_trials",
"_set_trial_status",
"pause_trial",
"stop_trial",
"_schedule_trial_save",
},
executor_whitelist_attr={
"has_resources_for_trial",
"pause_trial",
"save",
"_resource_updater",
},
)
@property
def resumed(self):
return self._resumed
@property
def search_alg(self):
return self._search_alg
@property
def scheduler_alg(self):
return self._scheduler_alg
def setup_experiments(
self, experiments: List[Experiment], total_num_samples: int
) -> None:
"""Obtains any necessary information from experiments.
Mainly used to setup callbacks.
Args:
experiments: List of Experiments
to use.
total_num_samples: Total number of samples
factoring in grid search samplers.
"""
experiment = experiments[0]
spec = experiment.public_spec if experiment else {}
spec["total_num_samples"] = total_num_samples
self._callbacks.setup(**spec)
def end_experiment_callbacks(self) -> None:
"""Calls ``on_experiment_end`` method in callbacks."""
self._callbacks.on_experiment_end(trials=self._trials)
@property
def experiment_state_file_name(self) -> str:
return self.CKPT_FILE_TMPL.format(self._session_str)
@property
def experiment_state_path(self) -> str:
"""Returns the local experiment checkpoint path."""
return Path(
self._storage.experiment_driver_staging_path,
self.experiment_state_file_name,
).as_posix()
@property
def experiment_path(self) -> str:
return self._storage.experiment_fs_path
def _create_checkpoint_manager(self):
return _ExperimentCheckpointManager(
storage=self._storage,
checkpoint_period=self._checkpoint_period,
sync_every_n_trial_checkpoints=self._trial_checkpoint_config.num_to_keep,
)
def save_to_dir(self):
"""Save TuneController state to the local staging experiment directory.
This includes:
- trial states
- TuneController internal state (all the serializable attributes)
- the searcher state
- the callback states
"""
# Get state from trial executor and runner
runner_state = {
# Trials
"trial_data": list(self._get_trial_checkpoints().values()),
# Experiment data
"runner_data": self.__getstate__(),
# Metadata
"stats": {"start_time": self._start_time},
}
driver_staging_path = self._storage.experiment_driver_staging_path
os.makedirs(driver_staging_path, exist_ok=True)
with open(
Path(driver_staging_path, self.experiment_state_file_name),
"w",
) as f:
json.dump(runner_state, f, cls=TuneFunctionEncoder)
self._search_alg.save_to_dir(driver_staging_path, session_str=self._session_str)
self._callbacks.save_to_dir(driver_staging_path, session_str=self._session_str)
def checkpoint(self, force: bool = False, wait: bool = False):
self._checkpoint_manager.sync_up_experiment_state(
save_fn=self.save_to_dir, force=force, wait=wait
)
def _requeue_restored_trials(
self, trials: List[Trial], resume_config: ResumeConfig
):
# Set trial statuses according to the resume configuration
for trial in sorted(
trials, key=lambda t: t.run_metadata.last_result_time, reverse=True
):
if trial.status == Trial.ERROR:
resume_type = resume_config.errored
elif trial.status == Trial.TERMINATED:
resume_type = resume_config.finished
else: # Unfinished (PENDING, RUNNING, PAUSED)
resume_type = resume_config.unfinished
trial_to_add = None
if resume_type == ResumeConfig.ResumeType.RESUME:
# Keep trial ID on resume
trial_to_add = trial
trial_to_add.run_metadata.error_filename = None
trial_to_add.run_metadata.pickled_error_filename = None
trial_to_add.set_status(Trial.PENDING)
elif resume_type == ResumeConfig.ResumeType.RESTART:
trial_to_add = trial.reset()
trial_to_add.restore_path = None
elif resume_type == ResumeConfig.ResumeType.SKIP:
trial_to_add = trial
if trial_to_add.status != Trial.ERROR:
# Set the status to terminated to skip it.
# Keep errored trial status as ERROR.
trial_to_add.set_status(Trial.TERMINATED)
else:
raise ValueError(f"Unknown resume type: {resume_type}")
assert trial_to_add is not None
self.add_trial(trial_to_add)
def _restore_trials(self, experiment_state: Dict) -> List[Trial]:
trials = []
for trial_json_state, trial_runtime_metadata in experiment_state["trial_data"]:
trial = Trial.from_json_state(trial_json_state)
trial.restore_run_metadata(trial_runtime_metadata)
# The following properties may be updated on restoration
# Ex: moved local/cloud experiment directory
# Propagate updated storage ctx properties to the trial's restored copy.
new_storage = copy.copy(trial.storage)
new_storage.storage_filesystem = self._storage.storage_filesystem
new_storage.storage_fs_path = self._storage.storage_fs_path
new_storage.experiment_dir_name = self._storage.experiment_dir_name
# ATTN: `trial.set_storage` is used intentionally, since it
# also updates the absolute paths and filesystem of tracked checkpoints.
trial.set_storage(new_storage)
# Avoid creating logdir in client mode for returned trial results,
# since the dir might not be creatable locally.
# TODO(ekl) this is kind of a hack.
if not ray.util.client.ray.is_connected():
trial.init_local_path() # Create logdir if it does not exist
trials.append(trial)
# NOTE: The restored run should reuse the same driver staging directory.
self._storage._timestamp = trials[0].storage._timestamp
return trials
def resume(self, resume_config: ResumeConfig):
"""Resumes all checkpointed trials from previous run.
Requires user to manually re-register their objects. Also stops
all ongoing trials.
"""
# 1. Restore TuneController state
# Find newest state file
newest_state_path = _find_newest_experiment_checkpoint(
self._storage.experiment_fs_path, fs=self._storage.storage_filesystem
)
if newest_state_path is None:
raise ValueError(
f"Tried to resume experiment from directory "
f"'{self._storage.experiment_fs_path}', but no "
f"experiment state file of the form '{TuneController.CKPT_FILE_TMPL}' "
"was found. This is expected if you are launching a new experiment."
)
logger.info(
"Restoring the run from the latest experiment state file: "
f"{Path(newest_state_path).name}"
)
with self._storage.storage_filesystem.open_input_stream(newest_state_path) as f:
experiment_state = json.loads(f.readall(), cls=TuneFunctionDecoder)
self.__setstate__(experiment_state["runner_data"])
# 2. Get the trial states that the run left off at.
trials = self._restore_trials(experiment_state)
# 3. Restore search algorithm and callback state
# Download the search algorithm and callback state to the driver staging dir.
self._checkpoint_manager.sync_down_experiment_state()
driver_staging_dir = self._storage.experiment_driver_staging_path
if self._search_alg.has_checkpoint(driver_staging_dir):
self._search_alg.restore_from_dir(driver_staging_dir)
if self._callbacks.can_restore(driver_staging_dir):
self._callbacks.restore_from_dir(driver_staging_dir)
# 4. Re-queue trials as needed, depending on their status.
self._requeue_restored_trials(trials, resume_config)
def update_max_pending_trials(self, max_pending_trials: Optional[int] = None):
self._max_pending_trials = max_pending_trials or _get_max_pending_trials(
self._search_alg
)
def update_pending_trial_resources(
self, resources: Union[dict, PlacementGroupFactory]
):
"""Update trial resources when resuming from checkpoint.
Only updating the pending ones.
"""
assert resources
if isinstance(resources, dict) and "gpu" not in resources:
resources["gpu"] = 0
for trial in self._trials:
if trial.status == Trial.PENDING:
trial.update_resources(resources=resources)
def is_finished(self):
"""Returns whether all trials have finished running."""
# The checks here are partly redundant but optimized for quick
# evaluation. Specifically, if there are live trials, we check
# these live trials first. Only if none of the live trials is
# live anymore do we loop over all trials for a final check.
trials_done = (
len(self._live_trials) == 0
or all(trial.is_finished() for trial in self._live_trials)
) and all(trial.is_finished() for trial in self._trials)
return trials_done and self._search_alg.is_finished()
def get_trial(self, tid):
trial = [t for t in self._trials if t.trial_id == tid]
return trial[0] if trial else None
def get_trials(self):
"""Returns the list of trials managed by this TrialRunner.
Note that the caller usually should not mutate trial state directly.
"""
return self._trials
def get_live_trials(self):
"""Returns the set of trials that are not in Trial.TERMINATED state."""
return self._live_trials
def add_trial(self, trial: Trial):
"""Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial: Trial to queue.
"""
# If the config map has had all the references replaced with placeholders,
# resolve them before adding the trial.
if self._placeholder_resolvers:
trial.resolve_config_placeholders(self._placeholder_resolvers)
# With trial.config resolved, create placement group factory if needed.
trial.create_placement_group_factory()
self._trials.append(trial)
if trial.status != Trial.TERMINATED:
self._live_trials.add(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self._wrapped(), trial)
self._mark_trial_to_checkpoint(trial)
logger.debug(f"Adding trial {trial} with status {trial.status}")
status_str_map = {
Trial.PENDING: self._pending_trials,
Trial.RUNNING: self._running_trials,
Trial.PAUSED: self._paused_trials,
Trial.TERMINATED: self._stopped_trials,
Trial.ERROR: self._failed_trials,
}
status_str_map[trial.status].add(trial)
if trial.status == Trial.PENDING:
self._pending_trials_list.append(trial)
self._resources_to_pending_trials[trial.placement_group_factory].add(trial)
def _update_trial_queue(self, blocking: bool = False, timeout: int = 600) -> bool:
"""Adds next trials to queue if possible.
Note that the timeout is currently unexposed to the user.
Args:
blocking: Blocks until either a trial is available
or is_finished (timeout or search algorithm finishes).
timeout: Seconds before blocking times out.
Returns:
Boolean indicating if a new trial was created or not.
"""
trial = self._search_alg.next_trial()
if blocking and not trial:
start = time.time()
# Checking `is_finished` instead of _search_alg.is_finished
# is fine because blocking only occurs if all trials are
# finished and search_algorithm is not yet finished
while (
not trial and not self.is_finished() and time.time() - start < timeout
):
logger.debug("Blocking for next trial...")
trial = self._search_alg.next_trial()
time.sleep(1)
if trial:
self.add_trial(trial)
return True
return False
def _used_resources_string(self) -> str:
allocated_resources = self._actor_manager.get_live_actors_resources()
return self._resource_updater.debug_string(allocated_resources)
def on_step_begin(self):
self._resource_updater.update_avail_resources()
def on_step_end(self):
self._cleanup_cached_actors(force_all=False)
self._cleanup_stopping_actors(force_all=False)
def _cleanup_cached_actors(self, force_all: bool = False):
if (
self._search_alg.is_finished()
and not self._staged_trials
and self._actor_cache.total_max_objects == 0
):
# If there are no more trials coming in, no trials are pending execution,
# and we don't explicitly want to cache objects, we can evict the full
# cache.
force_all = True
for tracked_actor in self._actor_cache.flush_cached_objects(
force_all=force_all
):
logger.debug(f"Cleaning up cached actor: {tracked_actor}")
# Unset termination callbacks as no trial is associated
tracked_actor.set_on_stop(None)
tracked_actor.set_on_error(None)
self._remove_actor(tracked_actor=tracked_actor)
def _cleanup_stopping_actors(self, force_all: bool = False):
now = time.monotonic()
if (
not force_all
and now - self._earliest_stopping_actor <= self._actor_cleanup_timeout
):
# If the earliest actor to timeout has not reached the timeout, return
return
# This is a bit costly, so we want to avoid running it too often
times = deque(
sorted(
[
(timestamp, tracked_actor)
for tracked_actor, timestamp in self._stopping_actors.items()
],
key=lambda item: item[0],
)
)
while times and (
force_all or time.monotonic() - times[0][0] > self._actor_cleanup_timeout
):
if (
time.monotonic() - times[0][0] < self._actor_force_cleanup_timeout
) and self._actor_manager.is_actor_started(tracked_actor=times[0][1]):
# Even if force_all=True, we give the actors time to clean up
self._actor_manager.next(timeout=1)
continue
_, tracked_actor = times.popleft()
if tracked_actor not in self._stopping_actors:
# Actor stopping has been handled by the block above
continue
if self._actor_manager.is_actor_started(tracked_actor=tracked_actor):
logger.debug(f"Forcefully killing actor: {tracked_actor}")
self._actor_manager.remove_actor(tracked_actor=tracked_actor, kill=True)
self._stopping_actors.pop(tracked_actor)
if times:
self._earliest_stopping_actor = times[0][0]
else:
self._earliest_stopping_actor = float("inf")
def step(self):
if self.is_finished():
raise TuneError("Called step when all trials finished?")
with warn_if_slow("on_step_begin"):
self.on_step_begin()
with warn_if_slow("callbacks.on_step_begin"):
self._callbacks.on_step_begin(
iteration=self._iteration, trials=self._trials
)
# Ask searcher for more trials
self._maybe_update_trial_queue()
# Start actors for added trials
self._maybe_add_actors()
# Handle one event
if not self._actor_manager.next(timeout=0.1):
# If there are no actors running, warn about potentially
# insufficient resources
if not self._actor_manager.num_live_actors:
self._insufficient_resources_manager.on_no_available_trials(
self.get_trials()
)
# Maybe stop whole experiment
self._stop_experiment_if_needed()
# Maybe save experiment state
try:
self.checkpoint()
except Exception as e:
logger.warning(f"Trial controller checkpointing failed: {str(e)}")
raise e
self._iteration += 1
with warn_if_slow("on_step_end"):
self.on_step_end()
with warn_if_slow("callbacks.on_step_end"):
self._callbacks.on_step_end(iteration=self._iteration, trials=self._trials)
def _set_trial_status(self, trial: Trial, status: str):
"""Set trial to a specific status.
This will keep track of trials with specific statuses in sets.
For PENDING and PAUSED trials we also keep a list of trials to be able
to retain FIFO ordering. See ``_maybe_add_actors`` for details.
Lastly we also keep a mapping from resources to pending/paused trials
to be able to efficiently start trials for cached actors.
"""
current_status = trial.status
if current_status == status:
logger.debug(f"Trial {trial} already has status {status}. Skipping update.")
return
status_str_map = {
Trial.PENDING: self._pending_trials,
Trial.RUNNING: self._running_trials,
Trial.PAUSED: self._paused_trials,
Trial.TERMINATED: self._stopped_trials,
Trial.ERROR: self._failed_trials,
}
logger.debug(
f"Setting status for trial {trial} from {current_status} to {status}"
)
assert trial in status_str_map[current_status], (trial, current_status)
assert trial not in status_str_map[status], (trial, status)
status_str_map[current_status].remove(trial)
status_str_map[status].add(trial)
# We keep a log for pending trials for FIFO scheduling.
# We do not need to remove from this list as we will just discard
# items that are in this list but not in the respective set.
if status == Trial.PENDING:
self._pending_trials_list.append(trial)
self._resources_to_pending_trials[trial.placement_group_factory].add(trial)
else:
self._resources_to_pending_trials[trial.placement_group_factory].discard(
trial
)
trial.set_status(status)
def _get_trial_checkpoints(self) -> Dict[str, str]:
for trial in self._trials_to_cache:
self._trial_metadata[trial.trial_id] = trial.get_json_state()
self._trials_to_cache.clear()
return self._trial_metadata
def _mark_trial_to_checkpoint(self, trial: Trial):
self._trials_to_cache.add(trial)
###
# UPDATE TRIALS
def _maybe_update_trial_queue(self):
"""Ask the searcher for more trials."""
if self._search_alg.is_finished():
return
dont_wait_for_trial = (
self._pending_trials or self._running_trials or self._paused_trials
)
while len(self._pending_trials) < self._max_pending_trials:
if not self._update_trial_queue(blocking=not dont_wait_for_trial):
break
dont_wait_for_trial = True
def _cleanup_trials(self):
logger.debug("CLEANING UP all trials")
for tracked_actor in list(self._actor_to_trial):
trial = self._actor_to_trial[tracked_actor]
logger.debug(
f"Scheduling trial stop at end of experiment (trial {trial}): "
f"{tracked_actor}"
)
self._schedule_trial_stop(trial)
# Clean up cached actors now
self._cleanup_cached_actors(force_all=True)
start = time.monotonic()
while time.monotonic() - start < 5 and self._actor_manager.num_total_actors:
if _dedup_logs("actor_manager_cleanup", str(start)):
logger.debug(
"Waiting for actor manager to clean up final state [dedup]"
)
self._actor_manager.next(timeout=1)
logger.debug("Force cleanup of remaining actors")
self._cleanup_stopping_actors(force_all=True)
self._actor_manager.cleanup()
def _remove_actor(self, tracked_actor: TrackedActor):
stop_future = self._actor_manager.schedule_actor_task(
tracked_actor, "stop", _return_future=True
)
now = time.monotonic()
if self._actor_manager.remove_actor(
tracked_actor, kill=False, stop_future=stop_future
):
# If the actor was previously alive, track
self._stopping_actors[tracked_actor] = now
self._earliest_stopping_actor = min(self._earliest_stopping_actor, now)
###
# ADD ACTORS
def _maybe_add_actors(self) -> None:
"""Add actors for pending and paused trials.
For actors that have not been staged, yet, we request an actor.
For actors that have been staged, already, we try to reuse a cached actor.
First, we handle the trial that the scheduler chooses to run.
Then, we handle all trials that are pending.
Lastly, we see if we have cached actors that we can assign to a pending or
paused trial. This can be the case when a trial has not been staged, yet,
for instance because the number of staging trials was too large.
"""
###
# 1: Start trial that the scheduler wants to run
with warn_if_slow("choose_trial_to_run"):
trial_to_run = self._scheduler_alg.choose_trial_to_run(self._wrapped())
if trial_to_run:
if _dedup_logs("trial_to_run_chosen", trial_to_run.trial_id):
logger.debug(
f"Chose trial to run from scheduler: {trial_to_run} [dedup]"
)
if (
trial_to_run not in self._staged_trials
and trial_to_run not in self._trial_to_actor
):
logger.debug(f"Staging trial to run: {trial_to_run}")
self._set_trial_status(trial_to_run, Trial.PENDING)
self._staged_trials.add(trial_to_run)
self._actor_cache.increase_max(trial_to_run.placement_group_factory)
# schedule_trial_actor also potentially uses cached actors
self._schedule_trial_actor(trial_to_run)
else:
# Otherwise, only try to use the cached actor
if _dedup_logs("trial_to_run_reuse", trial_to_run.trial_id):
logger.debug(
f"Trying to re-use actor for trial to run: {trial_to_run} "
f"[dedup]"
)
self._maybe_reuse_cached_actor(trial_to_run)
###
# 2: Start trials that are PENDING
def _maybe_add_actors(candidates: List[Trial]):
new_candidates = []
while candidates:
if self._actor_manager.num_pending_actors >= self._max_pending_trials:
break
trial = candidates.pop(0)
# If the trial is part of the list, but not of the set,
# we just ignore it. Removing it from the list on status
# change is too expensive.
if trial not in self._pending_trials:
continue
if trial in self._trial_to_actor:
new_candidates.append(trial)
continue
if trial in self._staged_trials:
self._maybe_reuse_cached_actor(trial)
continue
logger.debug(f"Scheduling actor for enqueued trial: {trial}")
self._staged_trials.add(trial)
self._actor_cache.increase_max(trial.placement_group_factory)
self._schedule_trial_actor(trial)
return new_candidates + candidates
self._pending_trials_list = _maybe_add_actors(self._pending_trials_list)
###
# 3: Start any trial that can be started with a cached actor
if self._actor_cache.num_cached_objects:
for resource in self._resources_to_pending_trials:
if not self._resources_to_pending_trials[resource]:
continue
if not self._actor_cache.has_cached_object(resource):
continue
start_trial = self._resources_to_pending_trials[resource].pop()
logger.debug(
f"Trying to re-use actor for enqueued trial: {start_trial}"
)
if not self._maybe_reuse_cached_actor(start_trial):
self._resources_to_pending_trials[resource].add(start_trial)
else:
if start_trial not in self._staged_trials:
self._staged_trials.add(start_trial)
self._actor_cache.increase_max(
start_trial.placement_group_factory
)
def _maybe_reuse_cached_actor(self, trial: Trial) -> bool:
"""Maybe reuse a cached actor for a trial.
If an actor has been scheduled for the trial already,
this will remove the original actor.
"""
if trial in self._resetting_trials:
return True
resource_request = trial.placement_group_factory
if not self._actor_cache.has_cached_object(resource_request):
return False
cached_actor = self._actor_cache.pop_cached_object(resource_request)
logger.debug(f"Reusing ACTOR for trial {trial}: {cached_actor}")
if trial in self._trial_to_actor:
original_actor = self._trial_to_actor.pop(trial)
self._actor_to_trial.pop(original_actor)
logger.debug(f"Removing ORIGINAL ACTOR for trial {trial}: {original_actor}")
self._remove_actor(tracked_actor=original_actor)
self._trial_to_actor[trial] = cached_actor
self._actor_to_trial[cached_actor] = trial
# Todo: get rid of Trial.runner
ray_actor = self._actor_manager._live_actors_to_ray_actors_resources[
cached_actor
][0]
trial.set_ray_actor(ray_actor)
self._schedule_trial_reset(trial, trial.config, trial.experiment_tag)
return True
def _schedule_trial_actor(self, trial: Trial):
"""Schedule an actor for a trial.
If a cached actor is available, use it. Otherwise, request a
new actor.
"""
logger.debug(f"Trying to schedule new ACTOR for trial {trial}")
assert trial.status == Trial.PENDING
trial.init_local_path()
# We checkpoint metadata here to try mitigating logdir duplication
self._mark_trial_to_checkpoint(trial)
if self._maybe_reuse_cached_actor(trial):
return
# Safeguard
if trial in self._trial_to_actor:
raise RuntimeError(
f"Tried to request a new actor for trial {trial}, but an old "
f"actor still exists. This can lead to leaked resources. The old "
f"actor should be removed first. "
f"This is an internal problem in Ray Tune. If you encounter this "
f"error, please raise an issue on "
f"https://github.com/ray-project/ray/issues"
)
trainable_cls = trial.get_trainable_cls()
if not trainable_cls:
exception = _AbortTrialExecution(
f"Invalid trainable: {trial.trainable_name}. If you passed "
f"a string, make sure the trainable was registered before."
)
trial.handle_error(exception)
self._schedule_trial_stop(trial, exception=exception)
return
_actor_cls = self._class_cache.get(trainable_cls)
trial.set_location(_Location())
trainable_kwargs = _get_trainable_kwargs(trial=trial)
with _change_working_directory(trial):
tracked_actor = self._actor_manager.add_actor(
cls=_actor_cls,
resource_request=trial.placement_group_factory,
kwargs=trainable_kwargs,
on_start=self._actor_started,
on_stop=self._actor_stopped,
on_error=self._actor_failed,
)
self._trial_to_actor[trial] = tracked_actor
self._actor_to_trial[tracked_actor] = trial
logger.debug(
f"Scheduled new ACTOR for trial {trial}: {tracked_actor}. "
f"Resources: {trial.placement_group_factory}"
)
def _unstage_trial_with_resources(self, trial: Trial):
"""Unstage trial, or one with the same resources as ``trial``."""
# Case 1: The trial we started was staged. Just remove it
if trial in self._staged_trials:
self._staged_trials.remove(trial)
self._actor_cache.decrease_max(trial.placement_group_factory)
return
# Case 2: We staged a trial "A" with the same resources, but our trial "B"
# was selected by the scheduler to run. The resource manager does not care
# about "trials", it just cares about resources being available. Thus we
# look for a staged trial with the same resource requirements and remove it
resource_request = trial.placement_group_factory
# Remove staged trial with same resource requirements
candidate_trial = None
for staged_trial in self._staged_trials:
staged_resources = staged_trial.placement_group_factory
if staged_resources == resource_request:
candidate_trial = staged_trial
break
if candidate_trial:
self._staged_trials.remove(candidate_trial)
self._actor_cache.decrease_max(candidate_trial.placement_group_factory)
return
raise RuntimeError(
"Started a trial with resources requested by a different trial, but "
"this trial was lost. This is an error in Ray Tune's execution "
"logic. Please raise a GitHub issue at "
"https://github.com/ray-project/ray/issues"
)
def _maybe_cache_trial_actor(self, trial: Trial) -> bool:
"""Cache trial actor for reuse, if needed.
We will only cache as many actors as are needed to fulfill any pending
resource requests for actors with the same resource requirements.
E.g. if we have 6 running trials and 4 additional staged actors, we will only
cache up to 4 of the running trial actors when they finish.
One exception is the case when we have no cached actors, yet. In that case,
we will always cache the actor in this method.
Later, in `_cleanup_cached_actors`, we will check again if we need this cached
actor. That method will keep the actor if we don't have any staged trials,
because we don't know at that point if the next trial might require the same
resources. But because there is no staged trial, it is safe to keep the actor
around, as it won't occupy resources needed by another trial until it's staged.
"""
if not self._reuse_actors:
return False
if self._search_alg.is_finished() and not self._staged_trials:
logger.debug(
f"Not caching actor of trial {trial} as the search is over "
f"and no more trials are staged."
)
return False
tracked_actor = self._trial_to_actor[trial]
if (
not self._actor_manager.is_actor_started(tracked_actor)
or self._actor_manager.is_actor_failed(tracked_actor)
or tracked_actor not in self._started_actors
):
logger.debug(
f"Not caching actor of trial {trial} as it has not been started, yet: "
f"{tracked_actor}"
)
return False
if not self._actor_cache.cache_object(
trial.placement_group_factory, tracked_actor
):
logger.debug(
f"Could not cache actor of trial {trial} for "
"reuse, as there are no pending trials "
"requiring its resources."
)
return False
logger.debug(f"Caching actor of trial {trial} for re-use: {tracked_actor}")
tracked_actor = self._trial_to_actor.pop(trial)
self._actor_to_trial.pop(tracked_actor)
trial.set_ray_actor(None)
return True
def _actor_started(self, tracked_actor: TrackedActor, log: str = "STARTED"):
self._started_actors.add(tracked_actor)
trial = self._actor_to_trial[tracked_actor]
logger.debug(f"Actor {log} for trial {trial}: {tracked_actor}")
self._unstage_trial_with_resources(trial)
ray_actor = self._actor_manager._live_actors_to_ray_actors_resources[
tracked_actor
][0]
trial.set_ray_actor(ray_actor)
self._callbacks.on_trial_start(
iteration=self._iteration, trials=self._trials, trial=trial
)
self._set_trial_status(trial, Trial.RUNNING)
self._mark_trial_to_checkpoint(trial)
if not self._schedule_trial_restore(trial):
self._schedule_trial_train(trial)
def _actor_stopped(self, tracked_actor: TrackedActor):
if tracked_actor in self._actor_to_trial:
trial = self._actor_to_trial.pop(tracked_actor)
logger.debug(f"Actor STOPPED for trial {trial}: {tracked_actor}")
self._trial_to_actor.pop(trial)
trial.set_ray_actor(None)
logger.debug(f"Actor STOPPED: {tracked_actor}")
self._stopping_actors.pop(tracked_actor, None)
self._started_actors.discard(tracked_actor)
def _actor_failed(self, tracked_actor: TrackedActor, exception: Exception):
trial = self._actor_to_trial[tracked_actor]
logger.debug(
f"Actor FAILED for trial {trial}: {tracked_actor}. "
f"Exception: {exception}"
)
if trial in (self._pending_trials | self._paused_trials):
# First, set to running (needed downstream in _process_trial_failure)
self._set_trial_status(trial, Trial.RUNNING)
logger.debug(
f"Trial {trial} failed in its creation task. Unstaging "
f"to allow it to be re-scheduled."
)
self._unstage_trial_with_resources(trial)
self._trial_task_failure(trial, exception=exception)
self._actor_manager.clear_actor_task_futures(tracked_actor)
# Clean up actor
tracked_actor.set_on_stop(None)
tracked_actor.set_on_error(None)
self._actor_manager.remove_actor(tracked_actor, kill=False)
# Trigger actor stopped callback
self._actor_stopped(tracked_actor)
def _schedule_trial_task(
self,
trial: Trial,
method_name: str,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
on_result: Optional[Callable[[Trial, Any], None]] = None,
on_error: Optional[Callable[[Trial, Exception], None]] = None,
_return_future: bool = False,
) -> Optional[ray.ObjectRef]:
"""Schedule an actor task future for a trial.
This is a wrapper around ``ActorManager.schedule_actor_task``. This method
retrieves the tracked actor for a trial to kick off the task.
It also wraps around the callbacks, retrieving the trial object given the
tracked actor.
"""
tracked_actor = self._trial_to_actor[trial]
_on_result = None
_on_error = None
args = args or tuple()
kwargs = kwargs or {}
if on_result:
def _on_result(tracked_actor: TrackedActor, *args, **kwargs):
assert trial == self._actor_to_trial[tracked_actor]
logger.debug(
f"Future {method_name.upper()} RESOLVED for trial {trial}: "
f"{args}, {kwargs}"
)
try:
on_result(trial, *args, **kwargs)
except Exception as e:
logger.debug(
f"Error handling {method_name.upper()} result "
f"for trial {trial}: {e}"
)
if e is TuneError or self._fail_fast == self.RAISE:
raise e
else:
raise TuneError(traceback.format_exc())
if on_error:
def _on_error(tracked_actor: TrackedActor, exception: Exception):
# If the actor failed, it has already been cleaned up.
if tracked_actor not in self._actor_to_trial:
assert isinstance(exception, RayActorError), type(exception)
else:
assert trial == self._actor_to_trial[tracked_actor]
logger.debug(
f"Future {method_name.upper()} FAILED for trial {trial}: "
f"{exception}"
)
try:
on_error(trial, exception)
except Exception as e:
logger.debug(
f"Error handling {method_name.upper()} failure "
f"for trial {trial}: {e}"
)
if e is TuneError or self._fail_fast == self.RAISE:
raise e
else:
raise TuneError(traceback.format_exc())
logger.debug(f"Future {method_name.upper()} SCHEDULED for trial {trial}")
with _change_working_directory(trial):
future = self._actor_manager.schedule_actor_task(
tracked_actor=tracked_actor,
method_name=method_name,
args=args,
kwargs=kwargs,
on_result=_on_result,
on_error=_on_error,
_return_future=_return_future,
)
if _return_future:
return future
def _queue_decision(self, trial, decision):
# Get old decision, setting it to the current decision if it isn't set
old_decision = self._queued_trial_decisions.setdefault(trial.trial_id, decision)
# Stopping always takes precedence. If we decided to stop, just quit
if old_decision is TrialScheduler.STOP:
return
# The old decision wasn't STOP. We update the decision only if it is
# STOP or PAUSE. The action will only be CONTINUE if it was set by
# the first received result and was never updated after that.
if decision is TrialScheduler.STOP or decision is TrialScheduler.PAUSE:
self._queued_trial_decisions[trial.trial_id] = decision
def _execute_action(self, trial: Trial, decision: str, after_save: bool = False):
"""Executes action based on decision.
Args:
trial: Trial to act on.
decision: Scheduling decision to undertake.
"""
if decision == TrialScheduler.CONTINUE:
self._schedule_trial_train(trial)
elif decision == TrialScheduler.PAUSE:
self.pause_trial(trial, should_checkpoint=not after_save)
elif decision == TrialScheduler.STOP:
self.stop_trial(trial)
elif decision == TrialScheduler.NOOP:
pass
else:
raise ValueError("Invalid decision: {}".format(decision))
def _maybe_execute_queued_decision(self, trial: Trial, after_save: bool = False):
# `self._queued_trial_decisions` now contains a final decision
# based on all results
final_decision = self._queued_trial_decisions.pop(trial.trial_id, None)
if final_decision:
logger.debug(
f"Executing final queued decision for {trial}: {final_decision}"
)
self._execute_action(trial, final_decision, after_save=after_save)
def _stop_experiment_if_needed(self):
"""Stops all trials."""
fail_fast = self._fail_fast and self._has_errored
if self._stopper.stop_all() or fail_fast or self._should_stop_experiment:
self._search_alg.set_finished()
[
self._schedule_trial_stop(t)
for t in self._trials
if t.status not in {Trial.ERROR, Trial.TERMINATED}
]
###
# Failure
def _trial_task_failure(self, trial: Trial, exception: Exception):
if self._fail_fast == self.RAISE:
raise exception
else:
if self._print_trial_errors:
logger.error(f"Trial task failed for trial {trial}", exc_info=exception)
self._process_trial_failure(trial, exception=exception)
def _process_trial_failure(
self,
trial: Trial,
exception: Union[TuneError, RayTaskError, RayActorError],
):
"""Handle trial failure.
Attempt trial recovery if possible, clean up state otherwise.
Args:
trial: Failed trial.
exception: Exception prior to invoking this method.
"""
self._has_errored = True
trial.handle_error(exception)
if trial.status == Trial.RUNNING and trial.should_recover():
self._try_recover(trial, exc=exception)
self._callbacks.on_trial_recover(
iteration=self._iteration, trials=self._trials, trial=trial
)
elif trial.status in {Trial.RUNNING, Trial.PENDING}:
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
self._schedule_trial_stop(trial, exception=exception)
self._callbacks.on_trial_error(
iteration=self._iteration, trials=self._trials, trial=trial
)
def _schedule_trial_stop(self, trial: Trial, exception: Optional[Exception] = None):
if trial.status == Trial.ERROR:
logger.debug(f"Not requesting trial STOP as it is ERROR already: {trial}")
return
logger.debug(f"Requesting to STOP actor for trial {trial}")
if trial.is_saving:
logger.debug(
f"Trial {trial} is currently saving/pausing. Scheduling STOP after "
f"save resolved."
)
self._cached_trial_decisions[trial.trial_id] = TrialScheduler.STOP
trial.temporary_state.saving_to = None
trial.temporary_state.restoring_from = None
self._set_trial_status(trial, Trial.ERROR if exception else Trial.TERMINATED)
trial.set_location(_Location())
if trial not in self._trial_to_actor:
logger.debug(f"Will not STOP trial actor as it is not live: {trial}")
return
tracked_actor = self._trial_to_actor[trial]
self._actor_manager.clear_actor_task_futures(tracked_actor=tracked_actor)
self._mark_trial_to_checkpoint(trial)
if not exception and self._maybe_cache_trial_actor(trial):
# Trial runner has been cached
return
logger.debug(f"Terminating actor for trial {trial}: {tracked_actor}")
tracked_actor = self._trial_to_actor.pop(trial)
self._actor_to_trial.pop(tracked_actor)
trial.set_ray_actor(None)
self._remove_actor(tracked_actor=tracked_actor)
def stop_trial(self, trial):
"""The canonical implementation of stopping a trial.
Trials may be in any external status when this function is called.
If trial is in state PENDING or PAUSED, calls `on_trial_remove` for
scheduler and `on_trial_complete()` for search_alg.
If trial is in state RUNNING, calls `on_trial_complete` for scheduler
and search_alg if RUNNING. Caller to ensure that there is no
outstanding future to be handled for the trial. If there is, the future
would be discarded.
"""
try:
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(trial.trial_id)
elif trial.status is Trial.RUNNING:
# By this time trial.last_result should have been
# updated already.
self._scheduler_alg.on_trial_complete(
self, trial, flatten_dict(trial.last_result)
)
self._search_alg.on_trial_complete(
trial.trial_id, result=flatten_dict(trial.last_result)
)
self._callbacks.on_trial_complete(
iteration=self._iteration, trials=self._trials, trial=trial
)
self._schedule_graceful_trial_stop(trial)
self._live_trials.discard(trial)
except Exception as e:
logger.exception("Trial %s: Error stopping trial.", trial)
if self._fail_fast == self.RAISE:
raise
if isinstance(e, TuneError):
self._process_trial_failure(trial, exception=e)
else:
self._process_trial_failure(
trial, _TuneStopTrialError(traceback.format_exc())
)
def _schedule_graceful_trial_stop(self, trial: Trial):
self._schedule_trial_export(trial)
if trial.status != "ERROR":
self._schedule_trial_stop(trial)
def _schedule_trial_pause(self, trial: Trial, should_checkpoint: bool = True):
if trial not in self._trial_to_actor:
logger.debug(
f"Trial PAUSE requested for trial {trial} but trial is already "
f"stopping. Ignoring."
)
return
if should_checkpoint:
self._cached_trial_decisions[trial.trial_id] = TrialScheduler.PAUSE
self._schedule_trial_save(trial=trial)
else:
self._schedule_trial_stop(trial)
self._set_trial_status(trial, Trial.PAUSED)
###
# TRAIN
def _schedule_trial_train(self, trial: Trial):
args = ()
method_name = "train"
buffer_length, buffer_time_s = self._maybe_buffer_training(trial)
if buffer_length > 1:
method_name = "train_buffered"
args = (buffer_length, buffer_time_s)
logger.debug(f"Scheduling future {method_name.upper()} for trial {trial}")
self._schedule_trial_task(
trial=trial,
method_name=method_name,
args=args,
on_result=self._on_training_result,
on_error=self._trial_task_failure,
)
def _maybe_buffer_training(self, trial: Trial) -> Tuple[int, float]:
buffer_time_s = max(
self._buffer_min_time_s,
min(self._buffer_max_time_s, self._actor_manager.num_actor_tasks // 10),
)
buffer_length = self._buffer_length
if buffer_length > 1 and trial.checkpoint_at_end:
# If a trial checkpoint can be triggered externally,
# it is not safe to buffer results.
if log_once("trial_executor_buffer_checkpoint"):
logger.warning(
"Disabling buffered training as you passed "
"`checkpoint_at_end` to `tune.CheckpointConfig()`."
)
return 1, buffer_time_s
if buffer_length > 1 and trial.checkpoint_freq > 0:
return min(buffer_length, trial.checkpoint_freq), buffer_time_s
return buffer_length, buffer_time_s
###
# RESULT
def _on_training_result(self, trial, result):
if not isinstance(result, list):
result = [result]
with warn_if_slow("process_trial_result"):
self._process_trial_results(trial, result)
self._maybe_execute_queued_decision(trial, after_save=False)
def _process_trial_results(self, trial, results):
logger.debug(f"Processing trial results for trial {trial}: {results}")
with warn_if_slow(
"process_trial_results",
message="Processing trial results took {duration:.3f} s, "
"which may be a performance bottleneck. Please consider "
"reporting results less frequently to Ray Tune.",
):
for i, result in enumerate(results):
with warn_if_slow("process_trial_result"):
decision = self._process_trial_result(trial, result)
if decision is None:
# If we didn't get a decision, this means a
# non-training future (e.g. a save) was scheduled.
# We do not allow processing more results then.
if i < len(results) - 1:
if log_once("tune_controller_buffer_checkpoint"):
logger.warning(
f"Trial {trial} has a non-training future "
f"scheduled but {len(results) - i} results "
f"left to process. This means that a "
f"checkpoint was requested, but buffered "
f"training was continued before it was "
f"saved. Consider using non-buffered "
f"training by setting the env variable "
f"`TUNE_RESULT_BUFFER_LENGTH=1`."
)
elif decision == TrialScheduler.STOP:
# If the decision is to stop the trial,
# ignore all results that came after that.
break
def _process_trial_result(self, trial: Trial, result: dict[str, Any]):
result.update(trial_id=trial.trial_id)
is_duplicate = RESULT_DUPLICATE in result
force_checkpoint = False
# TrialScheduler and SearchAlgorithm still receive a
# notification because there may be special handling for
# the `on_trial_complete` hook.
if is_duplicate:
logger.debug("Trial finished without logging 'done'.")
result = trial.last_result
result.update(done=True)
self._total_time += result.get(TIME_THIS_ITER_S, 0)
flat_result = flatten_dict(result)
self._validate_result_metrics(flat_result)
if self._stopper(trial.trial_id, result) or trial.should_stop(flat_result):
decision = TrialScheduler.STOP
else:
with warn_if_slow("scheduler.on_trial_result"):
decision = self._scheduler_alg.on_trial_result(
self._wrapped(), trial, flat_result
)
if decision == TrialScheduler.STOP:
result.update(done=True)
else:
# Only updating search alg if the trial is not to be stopped.
with warn_if_slow("search_alg.on_trial_result"):
self._search_alg.on_trial_result(trial.trial_id, flat_result)
# If this is not a duplicate result, the callbacks should
# be informed about the result.
if not is_duplicate:
with warn_if_slow("callbacks.on_trial_result"):
self._callbacks.on_trial_result(
iteration=self._iteration,
trials=self._trials,
trial=trial,
# NOTE: Allow user callbacks to modify the Trial result in place.
result=result,
)
force_checkpoint = result.get(SHOULD_CHECKPOINT, False)
trial.update_last_result(result)
# Include in next experiment checkpoint
self._mark_trial_to_checkpoint(trial)
# Checkpoints to disk. This should be checked even if
# the scheduler decision is STOP or PAUSE. Note that
# PAUSE only checkpoints to memory and does not update
# the global checkpoint state.
if decision != TrialScheduler.PAUSE:
# TODO(justinvyu): This is a temporary hack to fix pausing trials.
# We already schedule a save task in `pause_trial`, so no need
# to do it again here.
self._checkpoint_trial_if_needed(trial, force=force_checkpoint)
if trial.is_saving:
logger.debug(f"Caching trial decision for trial {trial}: {decision}")
# Cache decision to execute on after the save is processed.
# This prevents changing the trial's state or kicking off
# another training step prematurely.
if not self._cached_trial_decisions.get(trial.trial_id) or decision in {
TrialScheduler.PAUSE,
TrialScheduler.STOP,
}:
# If already set, only overwrite if it's a PAUSE or STOP. This is
# to avoid that CONTINUE decisions from a training step that resolve
# late overwrite PAUSE/STOP decision.
self._cached_trial_decisions[trial.trial_id] = decision
return None
else:
self._queue_decision(trial, decision)
return decision
def _validate_result_metrics(self, result):
"""
Check if any of the required metrics was not reported
in the last result. If the only items are ``done`` or any of
DEBUG_METRICS, this means that no result was ever received and
the trial just returned. This is also okay and will not raise
an error.
This will ignore checking for the DEFAULT_METRIC.
"""
if int(os.environ.get("TUNE_DISABLE_STRICT_METRIC_CHECKING", 0)) != 1 and (
len({k for k in result if k not in list(DEBUG_METRICS) + [DONE]}) > 1
):
base_metric = self._metric if self._metric != DEFAULT_METRIC else None
scheduler_metric = (
self._scheduler_alg.metric
if self._scheduler_alg.metric != DEFAULT_METRIC
else None
)
search_metrics = (
self._search_alg.metric
if self._search_alg.metric != DEFAULT_METRIC
else None
)
if isinstance(search_metrics, str):
search_metrics = [search_metrics]
if base_metric and base_metric not in result:
report_metric = base_metric
location = "tune.TuneConfig()"
elif scheduler_metric and scheduler_metric not in result:
report_metric = scheduler_metric
location = type(self._scheduler_alg).__name__
elif search_metrics and any(
search_metric not in result for search_metric in search_metrics
):
report_metric = list(
filter(
lambda search_metric: search_metric not in result,
search_metrics,
)
)
if len(report_metric) == 1:
report_metric = report_metric[0]
location = type(self._search_alg).__name__
else:
report_metric = None
location = None
if report_metric:
raise ValueError(
"Trial returned a result which did not include the "
"specified metric(s) `{}` that `{}` expects. "
"Make sure your calls to `tune.report()` include the "
"metric, or set the "
"TUNE_DISABLE_STRICT_METRIC_CHECKING "
"environment variable to 1. Result: {}".format(
report_metric, location, result
)
)
###
# SAVE
def _schedule_trial_save(
self,
trial: Trial,
result: Optional[Dict] = None,
) -> Optional[_FutureTrainingResult]:
if trial not in self._trial_to_actor:
logger.debug(
f"Trial SAVE requested for trial {trial} but trial is already "
f"stopping. Ignoring."
)
return None
result = result or trial.last_result
future = self._schedule_trial_task(
trial=trial,
method_name="save",
on_result=self._on_saving_result,
on_error=self._trial_task_failure,
_return_future=True,
)
# TODO(justinvyu): `trial.saving_to` (and trial.is_saving) is needed
# in order to prevent a done=True result from executing a STOP decision
# (which clears all futures) before the save gets processed.
# Keep this in for now while `train` and `save` are 2 separate steps.
trial.temporary_state.saving_to = _FutureTrainingResult(future)
# `trial.saving_to` holds a future training result -- this is only used
# in the case of PBT to block until the checkpoint is ready.
# In all other situations, the checkpoint future is processed by the
# actor event manager when it is ready.
return trial.temporary_state.saving_to
def _on_saving_result(self, trial, checkpoint_value: _TrainingResult):
with warn_if_slow("process_trial_save"):
self._process_trial_save(trial, checkpoint_value)
with warn_if_slow("callbacks.on_trial_save"):
self._callbacks.on_trial_save(
iteration=self._iteration, trials=self._trials, trial=trial
)
self._maybe_execute_queued_decision(trial, after_save=True)
def _process_trial_save(self, trial: Trial, checkpoint_value: _TrainingResult):
"""Processes a trial save.
Acts on the decision cached during the last `_process_trial` call.
Args:
trial: Trial being saved.
"""
logger.debug("Trial %s: Processing trial save.", trial)
try:
if not checkpoint_value.checkpoint:
logger.debug(f"Got empty checkpoint for trial {trial}")
else:
try:
self._callbacks.on_checkpoint(
iteration=self._iteration,
trials=self._trials,
trial=trial,
checkpoint=checkpoint_value.checkpoint,
)
except Exception:
logger.warning(
"Error encountered during processing of callbacks. "
"Ray Train/Tune recently changed the checkpoint interface "
"that is passed to callbacks. If you implemented your own "
"callback with an `on_checkpoint` handler, please review "
"the checkpoint interface and adjust your code "
"accordingly."
)
raise
trial.on_checkpoint(checkpoint_value)
self._checkpoint_manager.on_trial_checkpoint(trial)
self._mark_trial_to_checkpoint(trial)
except Exception:
logger.exception(
"Trial %s: Error handling checkpoint %s", trial, checkpoint_value
)
trial.temporary_state.saving_to = None
decision = self._cached_trial_decisions.pop(trial.trial_id, None)
if decision and checkpoint_value:
self._queue_decision(trial, decision)
def _checkpoint_trial_if_needed(self, trial, force=False):
"""Checkpoints trial based off trial.last_result."""
if trial.should_checkpoint() or force:
# Save trial runtime if possible.
if trial.temporary_state.ray_actor:
self._schedule_trial_save(trial)
###
# RESTORE
def _schedule_trial_restore(self, trial: Trial) -> bool:
checkpoint_result = trial.latest_checkpoint_result
if not checkpoint_result:
logger.debug(f"Not restoring trial {trial}: No checkpoint found.")
return False
# TODO(justinvyu): Is this really needed?
trial.temporary_state.restoring_from = checkpoint_result
method_name = "restore"
args = (checkpoint_result,)
self._schedule_trial_task(
trial=trial,
method_name=method_name,
args=args,
kwargs={},
on_result=self._on_restoring_result,
on_error=self._trial_task_failure,
)
return True
def _on_restoring_result(self, trial: Trial, result: Any):
self._process_trial_restore(trial)
def _process_trial_restore(self, trial: Trial):
"""Processes a trial restore.
Args:
trial: Trial being restored.
"""
logger.debug("Trial %s: Processing trial restore.", trial)
trial.on_restore()
logger.debug("Trial %s: Restore processed successfully", trial)
self._set_trial_status(trial, Trial.RUNNING)
self._schedule_trial_train(trial)
self._live_trials.add(trial)
def _try_recover(
self, trial: Trial, exc: Union[TuneError, RayTaskError, RayActorError]
):
"""Tries to recover trial.
Notifies SearchAlgorithm and Scheduler if failure to recover.
Args:
trial: Trial to recover.
exc: Exception prior to invoking this method.
"""
self._cached_trial_decisions.pop(trial.trial_id, None)
# Resetting this, in case that the trial is in saving status when it crashes.
if trial.is_saving:
trial.temporary_state.saving_to = None
self._schedule_trial_stop(trial, exception=exc)
logger.debug("Trial %s: Notifying Scheduler and requeueing.", trial)
self._requeue_trial(trial)
def _requeue_trial(self, trial):
"""Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
"""
self._scheduler_alg.on_trial_error(self, trial)
self._set_trial_status(trial, status=Trial.PENDING)
# TODO(rliaw): Right now, this pushes the trial to the end of queue
# because restoration can be expensive. However, this is not
# ideal since it just hides the issue - a better fix would
# be to use an actor table to detect the IP of the Trainable
# and rsync the files there.
# See https://github.com/ray-project/ray/issues/5168
self._trials.pop(self._trials.index(trial))
self._trials.append(trial)
self._live_trials.add(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self._wrapped(), trial)
###
# EXPORT
def _schedule_trial_export(self, trial: Trial):
if not trial.export_formats or len(trial.export_formats) <= 0:
return
# Todo: We are waiting here synchronously until the task resolved.
# Instead, we should schedule the trial stop after the export resolved.
# This requires changes in TrialRunner, which we can remove once the
# legacy execution path has been removed.
future = self._schedule_trial_task(
trial=trial,
method_name="export_model",
args=(trial.export_formats,),
on_result=None,
on_error=self._trial_task_failure,
_return_future=True,
)
self._actor_manager._actor_task_events.resolve_future(future)
###
# RESET
def _schedule_trial_reset(
self,
trial: Trial,
new_config: Dict,
new_experiment_tag: str,
):
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
# Pass magic variables
extra_config = copy.deepcopy(new_config)
extra_config[TRIAL_INFO] = _TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
extra_config[STDOUT_FILE] = stdout_file
extra_config[STDERR_FILE] = stderr_file
logger_creator = partial(
_noop_logger_creator, logdir=trial.storage.trial_working_directory
)
self._resetting_trials.add(trial)
self._schedule_trial_task(
trial=trial,
method_name="reset",
args=(extra_config,),
kwargs={
"logger_creator": logger_creator,
"storage": trial.storage,
},
on_result=self._on_trial_reset,
on_error=self._trial_task_failure,
)
def _on_trial_reset(self, trial: Trial, success: bool):
self._resetting_trials.remove(trial)
if not success:
info = (
"Trainable runner reuse requires reset_config() to be "
"implemented and return True."
)
logger.error(f"Could not re-use actor for trial {trial}: {info}")
exception = _AbortTrialExecution(info)
trial.handle_error(exception)
self._schedule_trial_stop(trial, exception=exception)
return
tracked_actor = self._trial_to_actor[trial]
self._actor_started(tracked_actor, log="REUSED")
def request_stop_trial(self, trial):
self._stop_queue.append(trial)
def request_stop_experiment(self):
self._should_stop_experiment = True
def _process_stop_requests(self):
while self._stop_queue:
t = self._stop_queue.pop()
self.stop_trial(t)
def pause_trial(self, trial: Trial, should_checkpoint: bool = True):
"""Pause a trial and reset the necessary state variables for resuming later.
Args:
trial: Trial to pause.
should_checkpoint: Whether or not an in-memory checkpoint should be created
for this paused trial. Defaults to True.
"""
# NOTE: The cached trial decision is not needed since we will overrule this
# decision with PAUSE.
self._cached_trial_decisions.pop(trial.trial_id, None)
self._schedule_trial_pause(trial, should_checkpoint=should_checkpoint)
def cleanup(self):
"""Cleanup trials and callbacks."""
self._cleanup_trials()
self.end_experiment_callbacks()
def __getstate__(self):
"""Gets state for trial.
Note that this is not used as a pickling override as
does not have all fields.
"""
state = self.__dict__.copy()
for k in [
"_trials",
"_live_trials",
"_stop_queue",
"_search_alg",
"_placeholder_resolvers",
"_scheduler_alg",
"_pending_trial_queue_times",
"_callbacks",
"_checkpoint_manager",
"_storage",
"_insufficient_resources_manager",
"_actor_manager",
"_class_cache",
"_resource_updater",
"_trials_to_cache",
"_trial_metadata",
"_actor_to_trial",
"_trial_to_actor",
"_resources_to_pending_trials",
"_pending_trials",
"_pending_trials_list",
"_running_trials",
"_paused_trials",
"_stopped_trials",
"_failed_trials",
"_resetting_trials",
"_started_actors",
"_stopping_actors",
"_staged_trials",
"_actor_cache",
]:
del state[k]
return state
def __setstate__(self, state):
# Use session_str from previous checkpoint if does not exist
session_str = state.pop("_session_str")
self.__dict__.setdefault("_session_str", session_str)
# Use start_time from previous checkpoint if does not exist
start_time = state.pop("_start_time")
self.__dict__.setdefault("_start_time", start_time)
self.__dict__.update(state)
self._checkpoint_manager = self._create_checkpoint_manager()
| TuneController |
python | buildout__buildout | src/zc/buildout/configparser.py | {
"start": 2254,
"end": 11266
} | class ____(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
# This regex captures either sections headers with optional trailing comment
# separated by a semicolon or a hash. Section headers can have an optional
# expression. Expressions and comments can contain brackets but no verbatim '#'
# and ';' : these need to be escaped.
# A title line with an expression has the general form:
# [section_name: some Python expression] #; some comment
# This regex leverages the fact that the following is a valid Python expression:
# [some Python expression] # some comment
# and that section headers are also delimited by [brackets] that are also [list]
# delimiters.
# So instead of doing complex parsing to balance brackets in an expression, we
# capture just enough from a header line to collect then remove the section_name
# and colon expression separator keeping only a list-enclosed expression and
# optional comments. The parsing and validation of this Python expression can be
# entirely delegated to Python's eval. The result of the evaluated expression is
# the always returned wrapped in a list with a single item that contains the
# original expression
section_header = re.compile(
r'(?P<head>\[)'
r'\s*'
r'(?P<name>[^\s#[\]:;{}]+)'
r'\s*'
r'(:(?P<expression>[^#;]*))?'
r'\s*'
r'(?P<tail>]'
r'\s*'
r'([#;].*)?$)'
).match
option_start = re.compile(
r'(?P<name>[^\s{}[\]=:]+\s*[-+]?)'
r'='
r'(?P<value>.*)$').match
leading_blank_lines = re.compile(r"^(\s*\n)+")
def parse(fp, fpname, exp_globals=dict):
"""Parse a sectioned setup file.
The sections in setup files contain a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
The title line is in the form [name] followed by an optional trailing
comment separated by a semicolon `;' or a hash `#' character.
Optionally the title line can have the form `[name:expression]' where
expression is an arbitrary Python expression. Sections with an expression
that evaluates to False are ignored. Semicolon `;' an hash `#' characters
must be string-escaped in expression literals.
exp_globals is a callable returning a mapping of defaults used as globals
during the evaluation of a section conditional expression.
"""
sections = {}
# the current section condition, possibly updated from a section expression
section_condition = True
context = None
cursect = None # None, or a dictionary
blockmode = None
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break # EOF
lineno = lineno + 1
if line[0] in '#;':
continue # comment
if line[0].isspace() and cursect is not None and optname:
if not section_condition:
#skip section based on its expression condition
continue
# continuation line
if blockmode:
line = line.rstrip()
else:
line = line.strip()
if not line:
continue
cursect[optname] = "%s\n%s" % (cursect[optname], line)
else:
header = section_header(line)
if header:
# reset to True when starting a new section
section_condition = True
sectname = header.group('name')
head = header.group('head') # the starting [
expression = header.group('expression')
tail = header.group('tail') # closing ]and comment
if expression:
# normalize tail comments to Python style
tail = tail.replace(';', '#') if tail else ''
# un-escape literal # and ; . Do not use a
# string-escape decode
expr = expression.replace(r'\x23','#').replace(r'\x3b', ';')
try:
# new-style markers as used in pip constraints, e.g.:
# 'python_version < "3.11" and platform_system == "Windows"'
marker = Marker(expr)
section_condition = marker.evaluate()
except InvalidMarker:
# old style buildout expression
# rebuild a valid Python expression wrapped in a list
expr = head + expr + tail
# lazily populate context only expression
if not context:
context = exp_globals()
# evaluated expression is in list: get first element
section_condition = eval(expr, context)[0]
# finally, ignore section when an expression
# evaluates to false
if not section_condition:
logger.debug(
'Ignoring section %(sectname)r with [expression]:'
' %(expression)r' % locals())
continue
if sectname in sections:
cursect = sections[sectname]
else:
sections[sectname] = cursect = {}
# So sections can't start with a continuation line
optname = None
elif cursect is None:
if not line.strip():
continue
# no section header in the file?
raise MissingSectionHeaderError(fpname, lineno, line)
else:
if line[:2] == '=>':
line = '<part-dependencies> = ' + line[2:]
mo = option_start(line)
if mo:
if not section_condition:
# filter out options of conditionally ignored section
continue
# option start line
optname, optval = mo.group('name', 'value')
optname = optname.rstrip()
optval = optval.strip()
# Handle multiple extensions of the same value in the
# same file. This happens with conditional sections.
opt_op = optname[-1]
if opt_op not in '+-':
opt_op = '='
if optname in cursect and opt_op in '+-':
# Strip any trailing \n, which happens when we have multiple
# +=/-= in one file
cursect[optname] = cursect[optname].rstrip()
if optval:
cursect[optname] = "%s\n%s" % (cursect[optname], optval)
else:
# If an assignment (=) comes after an extend (+=) /
# remove (-=), it overrides and replaces the preceding
# extend / remove
if opt_op == '=':
for suffix in '+-':
tempname = "%s %s" % (optname, suffix)
if tempname in cursect:
del cursect[tempname]
cursect[optname] = optval
blockmode = not optval
elif not (optname or line.strip()):
# blank line after section start
continue
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
for sectname in sections:
section = sections[sectname]
for name in section:
value = section[name]
if value[:1].isspace():
section[name] = leading_blank_lines.sub(
'', textwrap.dedent(value.rstrip()))
return sections
| MissingSectionHeaderError |
python | rapidsai__cudf | python/cudf/cudf/core/column/lists.py | {
"start": 1078,
"end": 19351
} | class ____(ColumnBase):
_VALID_BINARY_OPERATIONS = {"__add__", "__radd__", "__eq__", "__ne__"}
_VALID_PLC_TYPES = {plc.TypeId.LIST}
def __init__(
self,
plc_column: plc.Column,
size: int,
dtype: ListDtype,
offset: int,
null_count: int,
exposed: bool,
) -> None:
if (
not cudf.get_option("mode.pandas_compatible")
and not isinstance(dtype, ListDtype)
) or (
cudf.get_option("mode.pandas_compatible")
and not is_dtype_obj_list(dtype)
):
raise ValueError("dtype must be a cudf.ListDtype")
super().__init__(
plc_column=plc_column,
size=size,
dtype=dtype,
offset=offset,
null_count=null_count,
exposed=exposed,
)
def _get_children_from_pylibcudf_column(
self,
plc_column: plc.Column,
dtype: ListDtype, # type: ignore[override]
exposed: bool,
) -> tuple[ColumnBase, ColumnBase]:
children = super()._get_children_from_pylibcudf_column(
plc_column, dtype, exposed
)
return (
children[0],
children[1]._with_type_metadata(dtype.element_type),
)
def _prep_pandas_compat_repr(self) -> StringColumn | Self:
"""
Preprocess Column to be compatible with pandas repr, namely handling nulls.
* null (datetime/timedelta) = str(pd.NaT)
* null (other types)= str(pd.NA)
"""
# TODO: handle if self.has_nulls(): case
return self
@cached_property
def memory_usage(self) -> int:
n = super().memory_usage
child0_size = (self.size + 1) * self.base_children[0].dtype.itemsize
current_base_child = self.base_children[1]
current_offset = self.offset
n += child0_size
while type(current_base_child) is ListColumn:
child0_size = (
current_base_child.size + 1 - current_offset
) * current_base_child.base_children[0].dtype.itemsize
n += child0_size
current_offset_col = current_base_child.base_children[0]
if not len(current_offset_col):
# See https://github.com/rapidsai/cudf/issues/16164 why
# offset column can be uninitialized
break
current_offset = current_offset_col.element_indexing(
current_offset
)
current_base_child = current_base_child.base_children[1]
n += (
current_base_child.size - current_offset
) * current_base_child.dtype.itemsize
if current_base_child.nullable:
n += plc.null_mask.bitmask_allocation_size_bytes(
current_base_child.size
)
return n
def element_indexing(self, index: int) -> list:
result = super().element_indexing(index)
if isinstance(result, pa.Scalar):
py_element = maybe_nested_pa_scalar_to_py(result)
return self.dtype._recursively_replace_fields(py_element) # type: ignore[union-attr]
return result
def _cast_setitem_value(self, value: Any) -> plc.Scalar:
if isinstance(value, list) or value is None:
return pa_scalar_to_plc_scalar(
pa.scalar(value, type=self.dtype.to_arrow()) # type: ignore[union-attr]
)
elif value is NA or value is None:
return pa_scalar_to_plc_scalar(
pa.scalar(None, type=self.dtype.to_arrow()) # type: ignore[union-attr]
)
else:
raise ValueError(f"Can not set {value} into ListColumn")
@property
def base_size(self) -> int:
# in some cases, libcudf will return an empty ListColumn with no
# indices; in these cases, we must manually set the base_size to 0 to
# avoid it being negative
return max(0, len(self.base_children[0]) - 1)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
# Lists only support __add__, which concatenates lists.
reflect, op = self._check_reflected_op(op)
if not isinstance(other, type(self)):
return NotImplemented
if isinstance(other.dtype, ListDtype):
if op == "__add__":
return self.concatenate_rows([other])
else:
raise NotImplementedError(
"Lists concatenation for this operation is not yet"
"supported"
)
else:
raise TypeError("can only concatenate list to list")
@property
def elements(self) -> ColumnBase:
"""
Column containing the elements of each list (may itself be a
ListColumn)
"""
return self.children[1]
@property
def offsets(self) -> NumericalColumn:
"""
Integer offsets to elements specifying each row of the ListColumn
"""
return cast(NumericalColumn, self.children[0])
def to_arrow(self) -> pa.Array:
offsets = self.offsets.to_arrow()
elements = (
pa.nulls(len(self.elements))
if len(self.elements) == self.elements.null_count
else self.elements.to_arrow()
)
pa_type = pa.list_(elements.type)
if self.nullable:
nbuf = pa.py_buffer(self.mask.memoryview()) # type: ignore[union-attr]
buffers = [nbuf, offsets.buffers()[1]]
else:
buffers = list(offsets.buffers())
return pa.ListArray.from_buffers(
pa_type,
len(self),
# PyArrow stubs are too strict - from_buffers should accept None for missing buffers
buffers, # type: ignore[arg-type]
children=[elements],
)
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
raise NotImplementedError(
"Lists are not yet supported via `__cuda_array_interface__`"
)
def _with_type_metadata(self: Self, dtype: DtypeObj) -> Self:
if isinstance(dtype, ListDtype):
elements = self.base_children[1]._with_type_metadata(
dtype.element_type
)
new_children = [
self.plc_column.children()[0],
elements.plc_column,
]
new_plc_column = plc.Column(
plc.DataType(plc.TypeId.LIST),
self.plc_column.size(),
self.plc_column.data(),
self.plc_column.null_mask(),
self.plc_column.null_count(),
self.plc_column.offset(),
new_children,
)
return type(self)(
plc_column=new_plc_column,
size=self.size,
dtype=dtype,
offset=self.offset,
null_count=self.null_count,
exposed=False,
)
# For pandas dtypes, store them directly in the column's dtype property
elif isinstance(dtype, pd.ArrowDtype) and isinstance(
dtype.pyarrow_dtype, pa.ListType
):
self._dtype = dtype
return self
def copy(self, deep: bool = True) -> Self:
# Since list columns are immutable, both deep and shallow copies share
# the underlying device data and mask.
return super().copy(deep=False)
def leaves(self) -> ColumnBase:
if isinstance(self.elements, ListColumn):
return self.elements.leaves()
else:
return self.elements
@classmethod
def from_sequences(cls, arbitrary: Sequence[ColumnLike]) -> Self:
"""
Create a list column for list of column-like sequences
"""
data_col = column_empty(0)
mask_bools = []
offset_vals = [0]
offset = 0
# Build Data, Mask & Offsets
for data in arbitrary:
if _is_null_host_scalar(data):
mask_bools.append(False)
offset_vals.append(offset)
else:
mask_bools.append(True)
data_col = data_col.append(as_column(data))
offset += len(data)
offset_vals.append(offset)
offset_col = plc.Column.from_iterable_of_py(
offset_vals, dtype=plc.types.SIZE_TYPE
)
data_plc_col = data_col.plc_column
mask, null_count = plc.transform.bools_to_mask(
plc.Column.from_iterable_of_py(mask_bools)
)
plc_column = plc.Column(
plc.DataType(plc.TypeId.LIST),
len(offset_vals) - 1,
None,
mask,
null_count,
0,
[offset_col, data_plc_col],
)
return cls.from_pylibcudf(plc_column)
@cached_property
def _string_separators(self) -> plc.Column:
# Separator strings to match the Python format
return plc.Column.from_iterable_of_py(
[", ", "[", "]"], dtype=plc.DataType(plc.TypeId.STRING)
)
def as_string_column(self, dtype: DtypeObj) -> StringColumn:
"""
Create a strings column from a list column
"""
if cudf.get_option("mode.pandas_compatible"):
if isinstance(dtype, np.dtype) and dtype.kind == "O":
raise TypeError(
f"Cannot cast a list from {self.dtype} to {dtype}"
)
lc = self._transform_leaves(
lambda col, dtype: col.as_string_column(dtype), dtype
)
with acquire_spill_lock():
plc_column = plc.strings.convert.convert_lists.format_list_column(
lc.plc_column,
pa_scalar_to_plc_scalar(pa.scalar("None")),
self._string_separators,
)
return type(self).from_pylibcudf(plc_column) # type: ignore[return-value]
def _transform_leaves(
self, func: Callable[[ColumnBase, DtypeObj], ColumnBase], *args: Any
) -> Self:
"""
Return a new column like Self but with func applied to the last leaf column.
"""
leaf_queue: list[ListColumn] = []
curr_col: ColumnBase = self
while isinstance(curr_col, ListColumn):
leaf_queue.append(curr_col)
curr_col = curr_col.children[1]
plc_leaf_col = func(curr_col, *args).plc_column
# Rebuild the list column replacing just the leaf child
while leaf_queue:
col = leaf_queue.pop()
offsets = col.children[0].plc_column
plc_leaf_col = plc.Column(
plc.DataType(plc.TypeId.LIST),
col.size,
None,
plc.gpumemoryview(col.mask) if col.mask is not None else None,
col.null_count,
col.offset,
[offsets, plc_leaf_col],
)
return type(self).from_pylibcudf(plc_leaf_col)
@property
def element_type(self) -> DtypeObj:
"""
Returns the element type of the list column.
"""
if isinstance(self.dtype, ListDtype):
return self.dtype.element_type
else:
return get_dtype_of_same_kind(
self.dtype,
self.dtype.pyarrow_dtype.value_type.to_pandas_dtype(), # type: ignore[union-attr]
)
def to_pandas(
self,
*,
nullable: bool = False,
arrow_type: bool = False,
) -> pd.Index:
if arrow_type or (
cudf.get_option("mode.pandas_compatible")
and isinstance(self.dtype, pd.ArrowDtype)
):
return super().to_pandas(nullable=nullable, arrow_type=arrow_type)
elif nullable:
raise NotImplementedError(f"{nullable=} is not implemented.")
else:
return pd.Index(self.to_arrow().tolist(), dtype="object")
@acquire_spill_lock()
def count_elements(self) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.count_elements(self.plc_column)
)
@acquire_spill_lock()
def distinct(self, nulls_equal: bool, nans_all_equal: bool) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.distinct(
self.plc_column,
(
plc.types.NullEquality.EQUAL
if nulls_equal
else plc.types.NullEquality.UNEQUAL
),
(
plc.types.NanEquality.ALL_EQUAL
if nans_all_equal
else plc.types.NanEquality.UNEQUAL
),
)
)
@acquire_spill_lock()
def sort_lists(
self, ascending: bool, na_position: Literal["first", "last"]
) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.sort_lists(
self.plc_column,
plc.types.Order.ASCENDING
if ascending
else plc.types.Order.DESCENDING,
(
plc.types.NullOrder.BEFORE
if na_position == "first"
else plc.types.NullOrder.AFTER
),
False,
)
)
@acquire_spill_lock()
def extract_element_scalar(self, index: int) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.extract_list_element(
self.plc_column,
index,
)
)
@acquire_spill_lock()
def extract_element_column(self, index: ColumnBase) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.extract_list_element(
self.plc_column,
index.plc_column,
)
)
@acquire_spill_lock()
def contains_scalar(self, search_key: pa.Scalar) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.contains(
self.plc_column,
pa_scalar_to_plc_scalar(search_key),
)
)
@acquire_spill_lock()
def index_of_scalar(self, search_key: pa.Scalar) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.index_of(
self.plc_column,
pa_scalar_to_plc_scalar(search_key),
plc.lists.DuplicateFindOption.FIND_FIRST,
)
)
@acquire_spill_lock()
def index_of_column(self, search_keys: ColumnBase) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.index_of(
self.plc_column,
search_keys.plc_column,
plc.lists.DuplicateFindOption.FIND_FIRST,
)
)
@acquire_spill_lock()
def concatenate_rows(self, other_columns: list[ColumnBase]) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.concatenate_rows(
plc.Table(
[
col.plc_column
for col in itertools.chain([self], other_columns)
]
)
)
)
@acquire_spill_lock()
def concatenate_list_elements(self, dropna: bool) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.concatenate_list_elements(
self.plc_column,
plc.lists.ConcatenateNullPolicy.IGNORE
if dropna
else plc.lists.ConcatenateNullPolicy.NULLIFY_OUTPUT_ROW,
)
)
@acquire_spill_lock()
def segmented_gather(self, gather_map: ColumnBase) -> ColumnBase:
return type(self).from_pylibcudf(
plc.lists.segmented_gather(
self.plc_column,
gather_map.plc_column,
)
)
@acquire_spill_lock()
def join_list_elements(
self,
separator: str | StringColumn,
sep_na_rep: str,
string_na_rep: str,
) -> StringColumn:
if isinstance(separator, str):
sep: plc.Scalar | plc.Column = pa_scalar_to_plc_scalar(
pa.scalar(separator)
)
else:
sep = separator.plc_column
plc_column = plc.strings.combine.join_list_elements(
self.plc_column,
sep,
pa_scalar_to_plc_scalar(pa.scalar(sep_na_rep)),
pa_scalar_to_plc_scalar(pa.scalar(string_na_rep)),
plc.strings.combine.SeparatorOnNulls.YES,
plc.strings.combine.OutputIfEmptyList.NULL_ELEMENT,
)
return type(self).from_pylibcudf(plc_column) # type: ignore[return-value]
@acquire_spill_lock()
def minhash_ngrams(
self,
width: int,
seed: int | np.uint32,
a: NumericalColumn,
b: NumericalColumn,
) -> Self:
# Convert int to np.uint32 with validation
if isinstance(seed, int):
if seed < 0 or seed > np.iinfo(np.uint32).max:
raise ValueError(
f"seed must be in range [0, {np.iinfo(np.uint32).max}]"
)
seed = np.uint32(seed)
return type(self).from_pylibcudf(
plc.nvtext.minhash.minhash_ngrams(
self.plc_column,
width,
seed,
a.plc_column,
b.plc_column,
)
)
@acquire_spill_lock()
def minhash64_ngrams(
self,
width: int,
seed: int | np.uint64,
a: NumericalColumn,
b: NumericalColumn,
) -> Self:
# Convert int to np.uint64 with validation
if isinstance(seed, int):
if seed < 0 or seed > np.iinfo(np.uint64).max:
raise ValueError(
f"seed must be in range [0, {np.iinfo(np.uint64).max}]"
)
seed = np.uint64(seed)
return type(self).from_pylibcudf(
plc.nvtext.minhash.minhash64_ngrams(
self.plc_column,
width,
seed,
a.plc_column,
b.plc_column,
)
)
| ListColumn |
python | huggingface__transformers | tests/models/videomae/test_modeling_videomae.py | {
"start": 16495,
"end": 19627
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def test_inference_for_video_classification(self):
model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to(
torch_device
)
image_processor = self.default_image_processor
video = prepare_video()
inputs = image_processor(video, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [0.3669, -0.0688, -0.2421],
("cuda", 8): [0.3668, -0.0690, -0.2421],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_for_pretraining(self):
model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device)
image_processor = self.default_image_processor
video = prepare_video()
inputs = image_processor(video, return_tensors="pt").to(torch_device)
# add boolean mask, indicating which patches to mask
local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt")
check_torch_load_is_safe()
inputs["bool_masked_pos"] = torch.load(local_path, weights_only=True)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size([1, 1408, 1536])
expected_slice = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device
)
self.assertEqual(outputs.logits.shape, expected_shape)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify the loss (`config.norm_pix_loss` = `True`)
expected_loss = torch.tensor([0.5142], device=torch_device)
torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
# verify the loss (`config.norm_pix_loss` = `False`)
model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to(
torch_device
)
with torch.no_grad():
outputs = model(**inputs)
expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device)
torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
| VideoMAEModelIntegrationTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor33.py | {
"start": 614,
"end": 666
} | class ____[T: TD1]:
y: list[DC1[T]]
@dataclass
| DC2 |
python | doocs__leetcode | solution/3500-3599/3591.Check if Any Element Has Prime Frequency/Solution.py | {
"start": 0,
"end": 314
} | class ____:
def checkPrimeFrequency(self, nums: List[int]) -> bool:
def is_prime(x: int) -> bool:
if x < 2:
return False
return all(x % i for i in range(2, int(sqrt(x)) + 1))
cnt = Counter(nums)
return any(is_prime(x) for x in cnt.values())
| Solution |
python | getsentry__sentry | tests/sentry/web/frontend/test_organization_auth_settings.py | {
"start": 31840,
"end": 31981
} | class ____(GenericSAML2Provider):
name = "saml2_generic_dummy"
key = "saml2_generic_dummy"
@control_silo_test
| DummyGenericSAML2Provider |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 4098,
"end": 4226
} | class ____:
def m[S](self: S) -> S:
x = cast("list[tuple[S, S]]", self)
return x
| StringizedReferencesCanBeFixed |
python | huggingface__transformers | src/transformers/models/mvp/configuration_mvp.py | {
"start": 804,
"end": 7743
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50267):
Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MvpModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
use_prompt (`bool`, *optional*, defaults to `False`):
Whether or not to use prompt.
prompt_length (`int`, *optional*, defaults to 100):
The length of prompt.
prompt_mid_dim (`int`, *optional*, defaults to 800):
Dimensionality of the "intermediate" layer in prompt.
Example:
```python
>>> from transformers import MvpConfig, MvpModel
>>> # Initializing a MVP RUCAIBox/mvp style configuration
>>> configuration = MvpConfig()
>>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration
>>> model = MvpModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mvp"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50267,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
is_encoder_decoder=True,
decoder_start_token_id=2,
use_prompt=False,
prompt_length=100,
prompt_mid_dim=800,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.use_prompt = use_prompt
self.prompt_length = prompt_length
self.prompt_mid_dim = prompt_mid_dim
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["MvpConfig"]
| MvpConfig |
python | walkccc__LeetCode | solutions/2450. Number of Distinct Binary Strings After Applying Operations/2450.py | {
"start": 0,
"end": 282
} | class ____:
def countDistinctStrings(self, s: str, k: int) -> int:
# Since the content of `s` doesn't matter, for each i in [0, n - k], we can
# flip s[i..i + k] or don't flip it. Therefore, there's 2^(n - k + 1) ways.
return pow(2, len(s) - k + 1, 1_000_000_007)
| Solution |
python | ray-project__ray | doc/source/ray-core/doc_code/cgraph_troubleshooting.py | {
"start": 108,
"end": 565
} | class ____:
def get_arr(self, _):
numpy_arr = np.ones((5, 1024))
return numpy_arr
actor = NumPyActor.remote()
with InputNode() as inp:
dag = actor.get_arr.bind(inp)
cgraph = dag.experimental_compile()
for _ in range(5):
ref = cgraph.execute(0)
result = ray.get(ref)
# Adding this explicit del would fix any issues
# del result
# __numpy_troubleshooting_end__
# __teardown_troubleshooting_start__
@ray.remote
| NumPyActor |
python | viewflow__viewflow | viewflow/workflow/managers.py | {
"start": 4070,
"end": 9876
} | class ____(QuerySet):
"""Base manager for the Task."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._iterable_class = TaskIterable
def coerce_for(self, flow_classes):
"""Return subclass instances of the Task."""
self._coerced = True
flow_classes = list(flow_classes)
related = filter(
None,
map(
lambda flow_class: _get_related_path(flow_class.task_class, self.model),
flow_classes,
),
)
return self.filter(process__flow_class__in=flow_classes).select_related(
"process", *related
)
def user_queue(self, user, flow_class=None):
"""List of tasks of the flow_class permitted for user."""
queryset = self.filter(flow_task_type="HUMAN")
if flow_class is not None:
queryset = queryset.filter(process__flow_class=flow_class)
if not user.is_superuser:
has_permission = Q(owner_permission__isnull=True) | Q(owner=user)
if "guardian" in settings.INSTALLED_APPS:
from guardian.models import UserObjectPermission, GroupObjectPermission
queryset = queryset.annotate(
owner_permission_obj_check=Concat(
F("owner_permission"),
Value("_"),
F("owner_permission_obj_pk"),
output_field=models.CharField(),
)
)
all_user_perms = user.get_all_permissions()
if hasattr(user, "_viewflow_per_object_perm_cache"):
per_object_perms = user._viewflow_per_object_perm_cache
else:
per_object_perms = {
"{}.{}_{}".format(
userperm.content_type.app_label,
userperm.permission.codename,
userperm.object_pk,
)
for userperm in UserObjectPermission.objects.filter(user=user)
}
per_object_perms = per_object_perms | {
"{}.{}_{}".format(
groupperm.content_type.app_label,
groupperm.permission.codename,
groupperm.object_pk,
)
for groupperm in GroupObjectPermission.objects.filter(
group__in=user.groups.all()
)
}
user._viewflow_per_object_perm_cache = per_object_perms
has_permission = (
has_permission
| Q(owner_permission__in=all_user_perms)
| Q(owner_permission_obj_check__in=per_object_perms)
)
else:
has_permission = (
Q(owner_permission__in=user.get_all_permissions()) | has_permission
)
queryset = queryset.filter(has_permission)
return queryset
def user_archive(self, user, flow_class=None):
"""List of tasks of the flow_class completed by the user."""
queryset = self.filter(flow_task_type="HUMAN")
if flow_class is not None:
queryset = queryset.filter(process__flow_class=flow_class)
return queryset.filter(owner=user, finished__isnull=False)
def filter_available(self, flow_classes, user):
"""List of tasks available to view for the user."""
return self.model.objects.coerce_for(_available_flows(flow_classes, user))
def inbox(self, flow_classes, user):
"""List of tasks assigned to the user."""
return self.filter_available(flow_classes, user).filter(
owner=user, status=STATUS.ASSIGNED
)
def queue(self, flow_classes, user):
"""List of tasks permitted to assign for the user."""
return (
self.filter_available(flow_classes, user)
.user_queue(user)
.filter(status=STATUS.NEW)
)
def archive(self, flow_classes, user):
"""List of tasks finished by the user."""
return self.filter_available(flow_classes, user).filter(
owner=user, finished__isnull=False
)
def next_user_task(self, process, user):
"""
Lookup for the next task for a user execution.
Prefer assigned tasks first, if not, return first task from user queue
"""
# task inside a same process
task = get_next_process_task(self, process, user)
# task inside subprocess
if task is None:
subprocess_task = self.filter(process__parent_task__process=process).first()
if subprocess_task:
task = get_next_process_task(self, subprocess_task.process, user)
# task inside parent process
if task is None and process.parent_task_id:
task = get_next_process_task(self, process.parent_task.process, user)
# task inside other subprocesses of parent task
if task is None and process.parent_task_id:
processes = process.__class__._default_manager.filter(
parent_task__process=process.parent_task.process
).exclude(pk=process.pk)
for sub_process in processes:
task = get_next_process_task(self, sub_process, user)
if task:
break
return task
def _chain(self, **kwargs):
chained = super()._chain(**kwargs)
if hasattr(self, "_coerced"):
chained._coerced = self._coerced
return chained
| TaskQuerySet |
python | PyCQA__pylint | tests/functional/t/too/too_many_ancestors.py | {
"start": 539,
"end": 877
} | class ____(MutableSequence):
"""Minimal MutableSequence."""
def __getitem__(self, key):
return key
def __setitem__(self, key, value):
_ = key, value
def __delitem__(self, key):
_ = key
def insert(self, index, value):
_ = index, value
def __len__(self):
return 1
| ItemSequence |
python | mlflow__mlflow | mlflow/entities/experiment_tag.py | {
"start": 133,
"end": 887
} | class ____(_MlflowObject):
"""Tag object associated with an experiment."""
def __init__(self, key, value):
self._key = key
self._value = value
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
@property
def key(self):
"""String name of the tag."""
return self._key
@property
def value(self):
"""String value of the tag."""
return self._value
def to_proto(self):
param = ProtoExperimentTag()
param.key = self.key
param.value = self.value
return param
@classmethod
def from_proto(cls, proto):
return cls(proto.key, proto.value)
| ExperimentTag |
python | huggingface__transformers | src/transformers/models/internvl/processing_internvl.py | {
"start": 1009,
"end": 1365
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding_side": "left",
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"crop_to_patches": True,
},
"videos_kwargs": {
"return_tensors": "pt",
},
}
| InternVLProcessorKwargs |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 5214,
"end": 5274
} | class ____(RollingReduction):
how = "median"
| RollingMedian |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 1528,
"end": 4171
} | class ____:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis, :])**2, axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(
np.sum(np.sum((self.data-x[np.newaxis, :])**2, axis=1) < d**2+eps),
m,
)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2, np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, f"near_d={near_d:g} should be less than {d:g}")
assert_equal(np.sum(self.distance(self.data, x, 2) < d**2+eps), hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d, self.distance(x, self.data[near_i], 1))
assert_(near_d < d+eps, f"near_d={near_d:g} should be less than {d:g}")
assert_equal(np.sum(self.distance(self.data, x, 1) < d+eps), hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d, self.distance(x, self.data[near_i], np.inf))
assert_(near_d < d+eps, f"near_d={near_d:g} should be less than {d:g}")
assert_equal(np.sum(self.distance(self.data, x, np.inf) < d+eps), hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
@KDTreeTest
| ConsistencyTests |
python | pandas-dev__pandas | pandas/tests/series/methods/test_cov_corr.py | {
"start": 159,
"end": 1589
} | class ____:
def test_cov(self, datetime_series):
# full overlap
tm.assert_almost_equal(
datetime_series.cov(datetime_series), datetime_series.std() ** 2
)
# partial overlap
tm.assert_almost_equal(
datetime_series[:15].cov(datetime_series[5:]),
datetime_series[5:15].std() ** 2,
)
# No overlap
assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.cov(ts2, min_periods=12))
@pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
def test_cov_ddof(self, test_ddof, dtype):
# GH#34611
np_array1 = np.random.default_rng(2).random(10)
np_array2 = np.random.default_rng(2).random(10)
s1 = Series(np_array1, dtype=dtype)
s2 = Series(np_array2, dtype=dtype)
result = s1.cov(s2, ddof=test_ddof)
expected = np.cov(np_array1, np_array2, ddof=test_ddof)[0][1]
assert math.isclose(expected, result)
| TestSeriesCov |
python | ionelmc__pytest-benchmark | tests/test_sample.py | {
"start": 732,
"end": 992
} | class ____:
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self, func=str):
if self.object is empty:
self.object = self.factory()
return func(self.object)
| LocalsSimpleProxy |
python | readthedocs__readthedocs.org | readthedocs/search/api/v2/serializers.py | {
"start": 737,
"end": 1187
} | class ____(serializers.Serializer):
name = serializers.SerializerMethodField()
slug = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
def get_name(self, obj):
return list(getattr(obj, "name", []))
def get_slug(self, obj):
return list(getattr(obj, "slug", []))
def get_description(self, obj):
return list(getattr(obj, "description", []))
| ProjectHighlightSerializer |
python | celery__celery | t/unit/app/test_control.py | {
"start": 879,
"end": 1439
} | class ____:
def test_flatten_reply(self):
reply = [
{'foo@example.com': {'hello': 10}},
{'foo@example.com': {'hello': 20}},
{'bar@example.com': {'hello': 30}}
]
with pytest.warns(DuplicateNodenameWarning) as w:
nodes = control.flatten_reply(reply)
assert 'Received multiple replies from node name: {}.'.format(
next(iter(reply[0]))) in str(w[0].message.args[0])
assert 'foo@example.com' in nodes
assert 'bar@example.com' in nodes
| test_flatten_reply |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_slice.py | {
"start": 383,
"end": 6872
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1ResourceSliceSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceSlice - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta1ResourceSlice. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1ResourceSlice. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1ResourceSlice.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1ResourceSlice. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1ResourceSlice. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1ResourceSlice. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1ResourceSlice.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1ResourceSlice. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1ResourceSlice. # noqa: E501
:return: The metadata of this V1beta1ResourceSlice. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1ResourceSlice.
:param metadata: The metadata of this V1beta1ResourceSlice. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1ResourceSlice. # noqa: E501
:return: The spec of this V1beta1ResourceSlice. # noqa: E501
:rtype: V1beta1ResourceSliceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1ResourceSlice.
:param spec: The spec of this V1beta1ResourceSlice. # noqa: E501
:type: V1beta1ResourceSliceSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceSlice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceSlice):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourceSlice |
python | pennersr__django-allauth | allauth/usersessions/admin.py | {
"start": 117,
"end": 267
} | class ____(admin.ModelAdmin):
raw_id_fields = ("user",)
list_display = ("user", "created_at", "last_seen_at", "ip", "user_agent")
| UserSessionAdmin |
python | astropy__astropy | astropy/modeling/_fitting_parallel.py | {
"start": 3536,
"end": 10654
} | class ____(dict):
def __getattr__(self, attr):
return self[attr]
def _fit_models_to_chunk(
data,
*arrays,
block_info=None,
model=None,
fitter=None,
world=None,
diagnostics=None,
diagnostics_path=None,
diagnostics_callable=None,
iterating_shape=None,
fitter_kwargs=None,
iterating_axes=None,
fitting_axes=None,
weights_specified=None,
fit_info=None,
):
"""
Function that gets passed to map_blocks and will fit models to a specific
chunk of the data.
"""
if fitter_kwargs is None:
fitter_kwargs = {}
# Start off by re-ordering axes so that iterating axes come first followed
# by fitting axes
original_axes = tuple(idx for idx in (iterating_axes + fitting_axes))
new_axes = tuple(range(data.ndim))
data = np.moveaxis(data, original_axes, new_axes)
arrays = [np.moveaxis(array, original_axes, new_axes) for array in arrays]
if weights_specified:
weights = arrays[0]
arrays = arrays[1:]
else:
weights = None
# World coordinates can be specified either as Nd world arrays (in which
# case the world kwarg is set to `None`), or passed in via the world kwarg
# (if the world coordinates are given as 1D arrays)
if world is None:
parameters = arrays[: -model.n_inputs]
world_arrays = arrays[-model.n_inputs :]
else:
parameters = arrays
# Make the parameters into an Nd array, as this is what we will return. We
# then modify this array in-place in the rest of the function.
parameters = np.array(parameters)
# In some cases, dask calls this function with empty arrays, so we can
# take a short-cut here.
if data.ndim == 0 or data.size == 0 or block_info is None or block_info == []:
return parameters
# Because of the way map_blocks works, we need to have all arrays passed
# to map_blocks have the same shape, even though for the parameters this
# means there are extra unneeded dimensions. We slice these out here.
index = tuple([slice(None)] * (1 + len(iterating_axes)) + [0] * len(fitting_axes))
parameters = parameters[index]
# Transform array to object array and add one more index along the first
# dimension so that we can store the fit_info
if fit_info:
parameters = parameters.astype(object)
parameters = np.pad(parameters, [(0, 1)] + [(0, 0)] * (parameters.ndim - 1))
# The world argument is used to pass through 1D arrays of world coordinates
# (otherwise world_arrays is used) so if the model has more than one
# dimension we need to make these arrays N-dimensional.
if world is not None:
if model.n_inputs > 1:
world_values = np.meshgrid(*world, indexing="ij")
else:
world_values = world
iterating_shape_chunk = data.shape[: len(iterating_axes)]
model_i = model.copy()
for index in np.ndindex(iterating_shape_chunk):
# If all data values are NaN, just set parameters to NaN and move on
if np.all(np.isnan(data[index])):
for ipar in range(len(model.param_names)):
parameters[(ipar,) + index] = np.nan
continue
# Inject parameters into model
model_i._reset_parameters(
**{
name: parameters[(ipar,) + index]
for ipar, name in enumerate(model.param_names)
},
)
output = diagnostics == "all"
error = ""
all_warnings = []
if world is None:
world_values = tuple([w[index] for w in world_arrays])
if weights is None:
weights_kwargs = {}
else:
weights_kwargs = dict(weights=weights[index])
# Do the actual fitting - note that we can use inplace=True here to
# speed things up by avoiding an unnecessary copy, since we don't need
# to retain the original parameter values.
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model_fit = fitter(
model_i,
*world_values,
data[index],
inplace=True,
**weights_kwargs,
**fitter_kwargs,
)
all_warnings.extend(w)
except Exception as exc:
model_fit = None
if diagnostics is not None and diagnostics.startswith("error"):
output = True
error = traceback.format_exc()
for ipar in range(len(model_i.param_names)):
parameters[(ipar,) + index] = np.nan
parameters[(-1,) + index] = None
else:
# Put fitted parameters back into parameters arrays. These arrays are
# created in-memory by dask and are local to this process so should be
# safe to modify in-place
for ipar, name in enumerate(model_fit.param_names):
parameters[(ipar,) + index] = getattr(model_fit, name).value
if fit_info is True:
parameters[(-1,) + index] = fitter.fit_info
elif fit_info:
fit_info_dict = {}
for key in fit_info:
if hasattr(fitter.fit_info, key):
fit_info_dict[key] = getattr(fitter.fit_info, key)
else:
raise AttributeError(
f"fit_info on fitter has no attribute '{key}'"
)
parameters[(-1,) + index] = FitInfoSubset(fit_info_dict)
if diagnostics == "error+warn" and len(all_warnings) > 0:
output = True
if output:
# Construct a folder name based on the iterating index. Currently i
# i a 1-d index but we need to re-convert it back to an N-dimensional
# index.
index_abs = np.array(index) + np.array(
[block_info[0]["array-location"][idx][0] for idx in iterating_axes]
)
maxlen = ceil(log10(max(iterating_shape)))
fmt = "{0:0" + str(maxlen) + "d}"
index_folder = Path(diagnostics_path).joinpath(
"_".join(fmt.format(idx) for idx in index_abs)
)
index_folder.mkdir(parents=True, exist_ok=True)
# Output error, if any
if error:
index_folder.joinpath("error.log").write_text(error)
if all_warnings:
index_folder.joinpath("warn.log").write_text(
"".join(f"{warning}\n" for warning in all_warnings)
)
if diagnostics_callable is not None:
diagnostics_callable(
index_folder,
world_values,
data[index],
None if weights is None else weights[index],
model_fit,
fitter_kwargs,
)
return parameters
| FitInfoSubset |
python | Netflix__metaflow | metaflow/plugins/argo/argo_client.py | {
"start": 373,
"end": 459
} | class ____(MetaflowException):
headline = "Operation not permitted"
| ArgoNotPermitted |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/action_model.py | {
"start": 514,
"end": 934
} | class ____(NamedTuple):
"""
A NamedTuple with fields corresponding the the DistInstance objects
output by continuous and discrete distributions, respectively. Discrete distributions
output a list of DistInstance objects whereas continuous distributions output a single
DistInstance object.
"""
continuous: Optional[DistInstance]
discrete: Optional[List[DiscreteDistInstance]]
| DistInstances |
python | sphinx-doc__sphinx | sphinx/ext/napoleon/docstring.py | {
"start": 39447,
"end": 50739
} | class ____(GoogleDocstring):
"""Convert NumPy style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new :class:`sphinx.ext.napoleon.Config` object.
Other Parameters
----------------
app : :class:`sphinx.application.Sphinx`, optional
Application object representing the Sphinx process.
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : :class:`sphinx.ext.autodoc.Options`, optional
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and no_index that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Parameters
... ----------
... arg1 : int
... Description of `arg1`
... arg2 : str
... Description of `arg2`
... Returns
... -------
... str
... Description of return value.
... '''
>>> print(NumpyDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
Methods
-------
__str__()
Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
__unicode__()
Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
lines()
Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
def __init__(
self,
docstring: str | list[str],
config: SphinxConfig | None = None,
app: Sphinx | None = None,
what: str = '',
name: str = '',
obj: Any = None,
options: Any = None,
) -> None:
self._directive_sections = ['.. index::']
super().__init__(docstring, config, app, what, name, obj, options)
def _escape_args_and_kwargs(self, name: str) -> str:
func = super()._escape_args_and_kwargs
if ', ' in name:
return ', '.join(map(func, name.split(', ')))
else:
return func(name)
def _consume_field(
self, parse_type: bool = True, prefer_type: bool = False
) -> tuple[str, str, list[str]]:
line = self._lines.next()
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
_name, _type = line, ''
_name, _type = _name.strip(), _type.strip()
_name = self._escape_args_and_kwargs(_name)
if parse_type and not _type:
_type = self._lookup_annotation(_name)
if prefer_type and not _type:
_type, _name = _name, _type
if self._config.napoleon_preprocess_types:
_type = _convert_type_spec(
_type,
translations=self._config.napoleon_type_aliases or {},
debug_location=self._get_location(),
)
indent = self._get_indent(line) + 1
_desc = self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_returns_section(
self, preprocess_types: bool = False
) -> list[tuple[str, str, list[str]]]:
return self._consume_fields(prefer_type=True)
def _consume_section_header(self) -> str:
section = self._lines.next()
if not _directive_regex.match(section):
# Consume the header underline
self._lines.next()
return section
def _is_section_break(self) -> bool:
line1, line2 = self._lines.get(0), self._lines.get(1)
return (
not self._lines
or self._is_section_header()
or (not line1 and not line2)
or (
self._is_in_section
and line1
and not self._is_indented(line1, self._section_indent)
)
)
def _is_section_header(self) -> bool:
section, underline = self._lines.get(0), self._lines.get(1)
section = section.lower()
if section in self._sections and isinstance(underline, str):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _parse_see_also_section(self, section: str) -> list[str]:
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
except ValueError:
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content: list[str]) -> list[str]:
"""See Also
--------
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
Licence
-------
Derived from the NumpyDoc implementation of ``_parse_see_also``,
which was under the following licence:
Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>,
Pauli Virtanen <pav@iki.fi>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
items: list[tuple[str, list[str], str | None]] = []
def parse_item_name(text: str) -> tuple[str, str | None]:
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
msg = f'{text} is not a item name'
raise ValueError(msg)
def push_item(name: str | None, rest: list[str]) -> None:
if not name:
return
name, role = parse_item_name(name)
items.append((name, rest.copy(), role))
rest.clear()
def translate(
func: str,
description: list[str],
role: str | None,
) -> tuple[str, list[str], str | None]:
translations = self._config.napoleon_type_aliases
if role is not None or not translations:
return func, description, role
translated = translations.get(func, func)
match = self._name_rgx.match(translated)
if not match:
return translated, description, role
groups = match.groupdict()
role = groups['role']
new_func = groups['name'] or groups['name2']
return new_func, description, role
current_func = None
rest: list[str] = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end() :].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[: m.end()], line[m.end() :]
rest = [line.partition(':')[-1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
if not items:
return []
# apply type aliases
items = list(starmap(translate, items))
lines: list[str] = []
last_had_desc = True
for name, desc, role in items:
if role:
link = f':{role}:`{name}`'
else:
link = f':py:obj:`{name}`'
if desc or last_had_desc:
lines += ['']
lines += [link]
else:
lines[-1] += f', {link}'
if desc:
lines += self._indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
lines += ['']
return self._format_admonition('seealso', lines)
| NumpyDocstring |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_table09.py | {
"start": 481,
"end": 3181
} | class ____(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table(
"B2:K8",
{
"total_row": 1,
"columns": [
{"total_string": "Total"},
{},
{"total_function": "average"},
{"total_function": "count"},
{"total_function": "count_nums"},
{"total_function": "max"},
{"total_function": "min"},
{"total_function": "sum"},
{"total_function": "stdDev"},
{"total_function": "var"},
],
},
)
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="B2:K8" totalsRowCount="1">
<autoFilter ref="B2:K7"/>
<tableColumns count="10">
<tableColumn id="1" name="Column1" totalsRowLabel="Total"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3" totalsRowFunction="average"/>
<tableColumn id="4" name="Column4" totalsRowFunction="count"/>
<tableColumn id="5" name="Column5" totalsRowFunction="countNums"/>
<tableColumn id="6" name="Column6" totalsRowFunction="max"/>
<tableColumn id="7" name="Column7" totalsRowFunction="min"/>
<tableColumn id="8" name="Column8" totalsRowFunction="sum"/>
<tableColumn id="9" name="Column9" totalsRowFunction="stdDev"/>
<tableColumn id="10" name="Column10" totalsRowFunction="var"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleTable |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedules/ticks.py | {
"start": 274,
"end": 476
} | class ____(graphene.ObjectType):
run = graphene.Field("dagster_graphql.schema.pipelines.pipeline.GrapheneRun")
class Meta:
name = "ScheduleTickSuccessData"
| GrapheneScheduleTickSuccessData |
python | doocs__leetcode | solution/3100-3199/3189.Minimum Moves to Get a Peaceful Board/Solution.py | {
"start": 0,
"end": 279
} | class ____:
def minMoves(self, rooks: List[List[int]]) -> int:
rooks.sort()
ans = sum(abs(x - i) for i, (x, _) in enumerate(rooks))
rooks.sort(key=lambda x: x[1])
ans += sum(abs(y - j) for j, (_, y) in enumerate(rooks))
return ans
| Solution |
python | django__django | django/utils/dateformat.py | {
"start": 5935,
"end": 10159
} | class ____(TimeFormat):
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return "%02d" % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"""
Alternative month names as required by some locales. Proprietary
extension.
"""
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743, E741
"'1' if daylight saving time, '0' otherwise."
if self.timezone is None:
return ""
return "1" if self.timezone.dst(self.data) else "0"
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743, E741
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return "%02d" % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar().year
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
value = self.data
if not isinstance(value, datetime):
# Assume midnight in default timezone if datetime.date provided.
default_timezone = get_default_timezone()
value = datetime.combine(value, time.min).replace(tzinfo=default_timezone)
elif is_naive(value):
value = make_aware(value, timezone=self.timezone)
return format_datetime_rfc5322(value)
def S(self):
"""
English ordinal suffix for the day of the month, 2 characters; i.e.
'st', 'nd', 'rd' or 'th'.
"""
if self.data.day in (11, 12, 13): # Special case
return "th"
last = self.data.day % 10
if last == 1:
return "st"
if last == 2:
return "nd"
if last == 3:
return "rd"
return "th"
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
value = self.data
if not isinstance(value, datetime):
value = datetime.combine(value, time.min)
return int(value.timestamp())
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
return self.data.isocalendar().week
def y(self):
"""Year, 2 digits with leading zeros; e.g. '99'."""
return "%02d" % (self.data.year % 100)
def Y(self):
"""Year, 4 digits with leading zeros; e.g. '1999'."""
return "%04d" % self.data.year
def z(self):
"""Day of the year, i.e. 1 to 366."""
return self.data.timetuple().tm_yday
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| DateFormat |
python | kamyu104__LeetCode-Solutions | Python/gray-code.py | {
"start": 31,
"end": 481
} | class ____(object):
def grayCode(self, n):
"""
:type n: int
:rtype: List[int]
"""
result = [0]
for i in xrange(n):
for n in reversed(result):
result.append(1 << i | n)
return result
# Proof of closed form formula could be found here:
# http://math.stackexchange.com/questions/425894/proof-of-closed-form-formula-to-convert-a-binary-number-to-its-gray-code
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/comprehension8.py | {
"start": 147,
"end": 333
} | class ____:
input: str
output: str
def func1(a: ClassA, x: str):
a.output = x.join(
stripped for line in a.input.splitlines() if (stripped := line.strip())
)
| ClassA |
python | django-haystack__django-haystack | test_haystack/mocks.py | {
"start": 5266,
"end": 5356
} | class ____(BaseEngine):
backend = MockSearchBackend
query = MockSearchQuery
| MockEngine |
python | getsentry__sentry | src/sentry/utils/lazy_service_wrapper.py | {
"start": 882,
"end": 3848
} | class ____(Generic[T]):
"""
Lazyily instantiates a standard Sentry service class.
>>> LazyServiceWrapper(BaseClass, 'path.to.import.Backend', {})
Provides an ``expose`` method for dumping public APIs to a context, such as
module locals:
>>> service = LazyServiceWrapper(...)
>>> service.expose(locals())
"""
def __init__(
self,
backend_base: type[T],
backend_path: str,
options: Mapping[str, Any],
dangerous: Sequence[type[Service]] = (),
metrics_path: str | None = None,
) -> None:
self._backend = backend_path
self._options = options
self._base = backend_base
self._dangerous = dangerous
self._metrics_path = metrics_path
self._wrapped: _EmptyType | T = empty
def _setup(self) -> None:
if self._wrapped is not empty:
return
backend = import_string(self._backend)
assert issubclass(backend, Service)
if backend in self._dangerous:
warnings.warn(
warnings.UnsupportedBackend(
"The {!r} backend for {} is not recommended "
"for production use.".format(self._backend, self._base)
)
)
instance = backend(**self._options)
self._wrapped = instance
# -> Any is used as a sentinel here.
# tools.mypy_helpers.plugin fills in the actual type here
# conveniently, nothing else on this class is `Any`
def __getattr__(self, name: str) -> Any:
self._setup()
attr = getattr(self._wrapped, name)
# If we want to wrap in metrics, we need to make sure it's some callable,
# and within our list of exposed attributes. Then we can safely wrap
# in our metrics decorator.
if self._metrics_path and callable(attr) and name in self._base.__all__:
return metrics.wraps(
self._metrics_path, instance=name, tags={"backend": self._backend}
)(attr)
return attr
def test_only__downcast_to(self, t: type[U]) -> U:
"""test-only method to allow typesafe calling on specific subclasses"""
from sentry.utils.env import in_test_environment
assert in_test_environment(), "this method is not to be called outside of test"
self._setup()
if not isinstance(self._wrapped, t):
raise AssertionError(f"wrapped instance {self._wrapped!r} is not of type {t!r}!")
return self._wrapped
def expose(self, context: MutableMapping[str, Any]) -> None:
base = self._base
base_instance = base()
for key in itertools.chain(base.__all__, ("validate", "setup")):
if inspect.isroutine(getattr(base_instance, key)):
context[key] = (lambda f: lambda *a, **k: getattr(self, f)(*a, **k))(key)
else:
context[key] = getattr(base_instance, key)
| LazyServiceWrapper |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-box/llama_index/readers/box/BoxReader/base.py | {
"start": 949,
"end": 9655
} | class ____(BaseReader, ResourcesReaderMixin, FileSystemReaderMixin):
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReader"
def __init__(
self,
box_client: BoxClient,
):
self._box_client = add_extra_header_to_box_client(box_client)
@abstractmethod
def load_data(
self,
*args,
**kwargs,
) -> List[Document]:
pass
def load_resource(self, box_file_id: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id])
def get_resource_info(self, box_file_id: str) -> Dict:
"""
Get information about a specific resource.
Args:
resource_id (str): The resource identifier.
Returns:
Dict: A dictionary of information about the resource.
"""
# Connect to Box
box_check_connection(self._box_client)
resource = get_box_files_details(
box_client=self._box_client, file_ids=[box_file_id]
)
return resource[0].to_dict()
def list_resources(
self,
folder_id: Optional[str] = None,
file_ids: Optional[List[str]] = None,
is_recursive: bool = False,
) -> List[str]:
"""
Lists the IDs of Box files based on the specified folder or file IDs.
This method retrieves a list of Box file identifiers based on the provided
parameters. You can either specify a list of file IDs or a folder ID with an
optional `is_recursive` flag to include files from sub-folders as well.
Args:
folder_id (Optional[str], optional): The ID of the Box folder to list files
from. If provided, along with `is_recursive` set to True, retrieves data
from sub-folders as well. Defaults to None.
file_ids (Optional[List[str]], optional): A list of Box file IDs to retrieve.
If provided, this takes precedence over `folder_id`. Defaults to None.
is_recursive (bool, optional): If True and `folder_id` is provided, retrieves
resource IDs from sub-folders within the specified folder. Defaults to False.
Returns:
List[str]: A list containing the IDs of the retrieved Box files.
"""
# Connect to Box
box_check_connection(self._box_client)
# Get the file resources
box_files: List[File] = []
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
return [file.id for file in box_files]
def read_file_content(self, input_file: Path, **kwargs) -> bytes:
file_id = input_file.name
return get_file_content_by_id(box_client=self._box_client, box_file_id=file_id)
def search_resources(
self,
query: Optional[str] = None,
scope: Optional[SearchForContentScope] = None,
file_extensions: Optional[List[str]] = None,
created_at_range: Optional[List[str]] = None,
updated_at_range: Optional[List[str]] = None,
size_range: Optional[List[int]] = None,
owner_user_ids: Optional[List[str]] = None,
recent_updater_user_ids: Optional[List[str]] = None,
ancestor_folder_ids: Optional[List[str]] = None,
content_types: Optional[List[SearchForContentContentTypes]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[str]:
"""
Searches for Box resources based on specified criteria and returns a list of their IDs.
This method utilizes the Box API search functionality to find resources
matching the provided parameters. It then returns a list containing the IDs
of the found resources.
Args:
query (Optional[str], optional): A search query string. Defaults to None.
scope (Optional[SearchForContentScope], optional): The scope of the search.
Defaults to None.
file_extensions (Optional[List[str]], optional): A list of file extensions
to filter by. Defaults to None.
created_at_range (Optional[List[str]], optional): A list representing a date
range for file creation time. Defaults to None.
updated_at_range (Optional[List[str]], optional): A list representing a date
range for file update time. Defaults to None.
size_range (Optional[List[int]], optional): A list representing a size range
for files. Defaults to None.
owner_user_ids (Optional[List[str]], optional): A list of user IDs to filter
by owner. Defaults to None.
recent_updater_user_ids (Optional[List[str]], optional): A list of user IDs to
filter by recent updater. Defaults to None.
ancestor_folder_ids (Optional[List[str]], optional): A list of folder IDs to
search within. Defaults to None.
content_types (Optional[List[SearchForContentContentTypes]], optional): A list
of content types to filter by. Defaults to None.
limit (Optional[int], optional): The maximum number of results to return.
Defaults to None.
offset (Optional[int], optional): The number of results to skip before
starting to collect. Defaults to None.
Returns:
List[str]: A list of Box resource IDs matching the search criteria.
"""
# Connect to Box
box_check_connection(self._box_client)
box_files = search_files(
box_client=self._box_client,
query=query,
scope=scope,
file_extensions=file_extensions,
created_at_range=created_at_range,
updated_at_range=updated_at_range,
size_range=size_range,
owner_user_ids=owner_user_ids,
recent_updater_user_ids=recent_updater_user_ids,
ancestor_folder_ids=ancestor_folder_ids,
content_types=content_types,
limit=limit,
offset=offset,
)
return [box_file.id for box_file in box_files]
def search_resources_by_metadata(
self,
from_: str,
ancestor_folder_id: str,
query: Optional[str] = None,
query_params: Optional[Dict[str, str]] = None,
limit: Optional[int] = None,
marker: Optional[str] = None,
) -> List[str]:
"""
Searches for Box resources based on metadata and returns a list of their IDs.
This method utilizes the Box API search functionality to find resources
matching the provided metadata query. It then returns a list containing the IDs
of the found resources.
Args:
box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
from_ (str): The metadata template key to search from.
ancestor_folder_id (str): The ID of the Box folder to search within.
query (Optional[str], optional): A search query string. Defaults to None.
query_params (Optional[Dict[str, str]], optional): Additional query parameters
to filter the search results. Defaults to None.
limit (Optional[int], optional): The maximum number of results to return.
Defaults to None.
marker (Optional[str], optional): The marker for the start of the next page of
results. Defaults to None.
Returns:
List[str]: A list of Box resource IDs matching the search criteria.
"""
# Connect to Box
box_check_connection(self._box_client)
box_files = search_files_by_metadata(
box_client=self._box_client,
from_=from_,
ancestor_folder_id=ancestor_folder_id,
query=query,
query_params=query_params,
limit=limit,
marker=marker,
)
return [box_file.id for box_file in box_files]
| BoxReaderBase |
python | astral-sh__uv | scripts/packages/hatchling_dynamic/hatch_build.py | {
"start": 75,
"end": 218
} | class ____(BuildHookInterface):
def initialize(self, version, build_data):
build_data["dependencies"].append("anyio")
| LiteraryBuildHook |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 845,
"end": 893
} | class ____(object, object):
...
@decorator()
| A |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis37.py | {
"start": 315,
"end": 1455
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis37.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [46032384, 48088960]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"line": {"color": "yellow"}})
chart.set_y_axis({"line": {"color": "red"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | bokeh__bokeh | tests/unit/bokeh/application/handlers/test_code.py | {
"start": 1236,
"end": 1764
} | class ____(Model):
foo = Int(2)
child = Nullable(Instance(Model))
curdoc().add_root(AnotherModelInTestScript())
curdoc().add_root(SomeModelInTestScript())
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
| SomeModelInTestScript |
python | networkx__networkx | networkx/exception.py | {
"start": 2560,
"end": 2937
} | class ____(NetworkXException):
"""Raised if more than one valid solution exists for an intermediary step
of an algorithm.
In the face of ambiguity, refuse the temptation to guess.
This may occur, for example, when trying to determine the
bipartite node sets in a disconnected bipartite graph when
computing bipartite matchings.
"""
| AmbiguousSolution |
python | pypa__virtualenv | src/virtualenv/discovery/discover.py | {
"start": 74,
"end": 1173
} | class ____(ABC):
"""Discover and provide the requested Python interpreter."""
@classmethod
def add_parser_arguments(cls, parser):
"""
Add CLI arguments for this discovery mechanisms.
:param parser: the CLI parser
"""
raise NotImplementedError
def __init__(self, options) -> None:
"""
Create a new discovery mechanism.
:param options: the parsed options as defined within :meth:`add_parser_arguments`
"""
self._has_run = False
self._interpreter = None
self._env = options.env
@abstractmethod
def run(self):
"""
Discovers an interpreter.
:return: the interpreter ready to use for virtual environment creation
"""
raise NotImplementedError
@property
def interpreter(self):
""":return: the interpreter as returned by :meth:`run`, cached"""
if self._has_run is False:
self._interpreter = self.run()
self._has_run = True
return self._interpreter
__all__ = [
"Discover",
]
| Discover |
python | walkccc__LeetCode | solutions/124. Binary Tree Maximum Path Sum/124.py | {
"start": 0,
"end": 548
} | class ____:
def maxPathSum(self, root: TreeNode | None) -> int:
ans = -math.inf
def maxPathSumDownFrom(root: TreeNode | None) -> int:
"""
Returns the maximum path sum starting from the current root, where
root.val is always included.
"""
nonlocal ans
if not root:
return 0
l = max(0, maxPathSumDownFrom(root.left))
r = max(0, maxPathSumDownFrom(root.right))
ans = max(ans, root.val + l + r)
return root.val + max(l, r)
maxPathSumDownFrom(root)
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 934315,
"end": 935124
} | class ____(sgqlc.types.Type):
"""Represents a user who is a collaborator of a repository."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "permission", "permission_sources")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="node")
permission = sgqlc.types.Field(sgqlc.types.non_null(RepositoryPermission), graphql_name="permission")
"""The permission the user has on the repository."""
permission_sources = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(PermissionSource)), graphql_name="permissionSources")
"""A list of sources for the user's access to the repository."""
| RepositoryCollaboratorEdge |
python | pytorch__pytorch | test/inductor/test_padding.py | {
"start": 2004,
"end": 3206
} | class ____(nn.Module):
"""
It's very common that a transformer model will do a matmul and then
softmax/log_softmax in the end.
Creating this toy model to capture the pattern and make sure we do
proper padding.
"""
def __init__(self, vocab_size=30523, bias=True):
"""
The default vocab size for BertForMaskedLM is 30522.
We run a few test cases with good or bad vocab_size around Bert's
default value.
"""
super().__init__()
self.vocab_size = vocab_size
self.linear = nn.Linear(768, vocab_size, bias=bias)
self.ce = nn.CrossEntropyLoss()
def forward(self, x, label):
x = self.linear(x)
return self.ce(x.view(-1, self.vocab_size), label.view(-1))
def get_example_inputs(self, batch_size=16):
return torch.randn(batch_size, 512, 768), torch.randint(
0, self.vocab_size, (batch_size, 512)
)
def forward_and_backward_pass(m, inputs):
m(*inputs).sum().backward()
@config.patch(
{
"benchmark_kernel": True,
"triton.unique_kernel_names": True,
"triton.cudagraphs": USE_CUDA_GRAPHS,
}
)
@requires_gpu()
| LinearAndSoftmax |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_pattern03.py | {
"start": 315,
"end": 3550
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [110902272, 110756608]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$3",
"pattern": {
"pattern": "percent_10",
"fg_color": "#C00000",
"bg_color": "#FFFFFF",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$3",
"pattern": {
"pattern": "percent_60",
"fg_color": "#FF0000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$C$1:$C$3",
"pattern": {
"pattern": "light_upward_diagonal",
"fg_color": "#FFC000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$D$1:$D$3",
"pattern": {
"pattern": "light_horizontal",
"fg_color": "#FFFF00",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$E$1:$E$3",
"pattern": {
"pattern": "dashed_upward_diagonal",
"fg_color": "#92D050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$F$1:$F$3",
"pattern": {
"pattern": "wave",
"fg_color": "#00B050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$G$1:$G$3",
"pattern": {
"pattern": "dotted_grid",
"fg_color": "#00B0F0",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$H$1:$H$3",
"pattern": {
"pattern": "large_grid",
"fg_color": "#0070C0",
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pandas-dev__pandas | asv_bench/benchmarks/attrs_caching.py | {
"start": 441,
"end": 1414
} | class ____:
params = [["numeric", "object", "category", "datetime64", "datetime64tz"]]
param_names = ["dtype"]
def setup(self, dtype):
if dtype == "numeric":
self.series = pd.Series([1, 2, 3])
elif dtype == "object":
self.series = pd.Series(["a", "b", "c"], dtype=object)
elif dtype == "category":
self.series = pd.Series(["a", "b", "c"], dtype="category")
elif dtype == "datetime64":
self.series = pd.Series(pd.date_range("2013", periods=3))
elif dtype == "datetime64tz":
self.series = pd.Series(pd.date_range("2013", periods=3, tz="UTC"))
def time_array(self, dtype):
self.series.array
def time_extract_array(self, dtype):
extract_array(self.series)
def time_extract_array_numpy(self, dtype):
extract_array(self.series, extract_numpy=True)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| SeriesArrayAttribute |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-oracleai/llama_index/readers/oracleai/base.py | {
"start": 5135,
"end": 13133
} | class ____(BaseReader):
"""
Read documents using OracleDocLoader
Args:
conn: Oracle Connection,
params: Loader parameters.
"""
def __init__(self, conn: Connection, params: Dict[str, Any]):
self.conn = conn
self.params = json.loads(json.dumps(params))
def load(self) -> List[Document]:
"""Load data into Document objects..."""
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
ncols = 0
results = []
metadata = {}
m_params = {"plaintext": "false"}
try:
# extract the parameters
if self.params is not None:
self.file = self.params.get("file")
self.dir = self.params.get("dir")
self.owner = self.params.get("owner")
self.tablename = self.params.get("tablename")
self.colname = self.params.get("colname")
else:
raise Exception("Missing loader parameters")
oracledb.defaults.fetch_lobs = False
if self.file:
doc = OracleDocReader.read_file(self.conn, self.file, m_params)
if doc is None:
return results
results.append(doc)
if self.dir:
skip_count = 0
if not (os.path.exists(self.dir) and os.path.isdir(self.dir)):
raise Exception("Directory does not exist or invalid.")
else:
for file_name in os.listdir(self.dir):
file_path = os.path.join(self.dir, file_name)
if os.path.isfile(file_path):
doc = OracleDocReader.read_file(
self.conn, file_path, m_params
)
if doc is None:
skip_count = skip_count + 1
print(f"Total skipped: {skip_count}\n")
else:
results.append(doc)
if self.tablename:
try:
if self.owner is None or self.colname is None:
raise Exception("Missing owner or column name")
cursor = self.conn.cursor()
self.mdata_cols = self.params.get("mdata_cols")
if self.mdata_cols is not None:
if len(self.mdata_cols) > 3:
raise Exception(
"Exceeds the max number of columns you can request for metadata."
)
# execute a query to get column data types
sql = (
"select column_name, data_type from all_tab_columns where owner = '"
+ self.owner.upper()
+ "' and "
+ "table_name = '"
+ self.tablename.upper()
+ "'"
)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
if row[0] in self.mdata_cols:
if row[1] not in [
"NUMBER",
"BINARY_DOUBLE",
"BINARY_FLOAT",
"LONG",
"DATE",
"TIMESTAMP",
"VARCHAR2",
]:
raise Exception(
"The datatype for the column requested for metadata is not supported."
)
self.mdata_cols_sql = ", rowid"
if self.mdata_cols is not None:
for col in self.mdata_cols:
self.mdata_cols_sql = self.mdata_cols_sql + ", " + col
# [TODO] use bind variables
sql = (
"select dbms_vector_chain.utl_to_text(t."
+ self.colname
+ ", json('"
+ json.dumps(m_params)
+ "')) mdata, dbms_vector_chain.utl_to_text(t."
+ self.colname
+ ") text"
+ self.mdata_cols_sql
+ " from "
+ self.owner
+ "."
+ self.tablename
+ " t"
)
cursor.execute(sql)
for row in cursor:
metadata = {}
if row is None:
doc_id = OracleDocReader.generate_object_id(
self.conn.username
+ "$"
+ self.owner
+ "$"
+ self.tablename
+ "$"
+ self.colname
)
metadata["_oid"] = doc_id
results.append(Document(text="", metadata=metadata))
else:
if row[0] is not None:
data = str(row[0])
if data.startswith(("<!DOCTYPE html", "<HTML>")):
p = ParseOracleDocMetadata()
p.feed(data)
metadata = p.get_metadata()
doc_id = OracleDocReader.generate_object_id(
self.conn.username
+ "$"
+ self.owner
+ "$"
+ self.tablename
+ "$"
+ self.colname
+ "$"
+ str(row[2])
)
metadata["_oid"] = doc_id
metadata["_rowid"] = row[2]
# process projected metadata cols
if self.mdata_cols is not None:
ncols = len(self.mdata_cols)
for i in range(ncols):
if i == 0:
metadata["_rowid"] = row[i + 2]
else:
metadata[self.mdata_cols[i]] = row[i + 2]
if row[1] is None:
results.append(Document(text="", metadata=metadata))
else:
results.append(
Document(text=str(row[1]), metadata=metadata)
)
except Exception as ex:
print(f"An exception occurred :: {ex}")
traceback.print_exc()
cursor.close()
raise
return results
except Exception as ex:
print(f"An exception occurred :: {ex}")
traceback.print_exc()
raise
def load_data(self) -> List[Document]:
return self.load()
logger = logging.getLogger(__name__)
| OracleReader |
python | sqlalchemy__sqlalchemy | examples/asyncio/greenlet_orm.py | {
"start": 746,
"end": 2418
} | class ____(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
def run_queries(session):
"""A function written in "synchronous" style that will be invoked
within the asyncio event loop.
The session object passed is a traditional orm.Session object with
synchronous interface.
"""
stmt = select(A)
result = session.execute(stmt)
for a1 in result.scalars():
print(a1)
# lazy loads
for b1 in a1.bs:
print(b1)
result = session.execute(select(A).order_by(A.id))
a1 = result.scalars().first()
a1.data = "new data"
async def async_main():
"""Main program function."""
engine = create_async_engine(
"postgresql+asyncpg://scott:tiger@localhost/test",
echo=True,
)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with AsyncSession(engine) as session:
async with session.begin():
session.add_all(
[
A(bs=[B(), B()], data="a1"),
A(bs=[B()], data="a2"),
A(bs=[B(), B()], data="a3"),
]
)
# we have the option to run a function written in sync style
# within the AsyncSession.run_sync() method. The function will
# be passed a synchronous-style Session object and the function
# can use traditional ORM patterns.
await session.run_sync(run_queries)
await session.commit()
asyncio.run(async_main())
| B |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 165568,
"end": 167187
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"widget",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", sa.String(40), nullable=False, unique=True),
)
Table(
"widget_rel",
metadata,
Column("parent_id", Integer, ForeignKey("widget.id")),
Column("child_id", Integer, ForeignKey("widget.id")),
sa.UniqueConstraint("parent_id", "child_id"),
)
def test_basic(self):
widget, widget_rel = self.tables.widget, self.tables.widget_rel
class Widget(ComparableEntity):
pass
self.mapper_registry.map_imperatively(
Widget,
widget,
properties={
"children": relationship(
Widget,
secondary=widget_rel,
primaryjoin=widget_rel.c.parent_id == widget.c.id,
secondaryjoin=widget_rel.c.child_id == widget.c.id,
lazy="joined",
join_depth=1,
)
},
)
sess = fixture_session()
w1 = Widget(name="w1")
w2 = Widget(name="w2")
w1.children.append(w2)
sess.add(w1)
sess.flush()
sess.expunge_all()
eq_(
[Widget(name="w1", children=[Widget(name="w2")])],
sess.query(Widget).filter(Widget.name == "w1").all(),
)
| SelfReferentialM2MEagerTest |
python | pytorch__pytorch | torch/distributed/_composable_state.py | {
"start": 74,
"end": 1459
} | class ____:
pass
_module_state_mapping: weakref.WeakKeyDictionary[
nn.Module, weakref.ReferenceType[_State]
] = weakref.WeakKeyDictionary()
def _insert_module_state(module: nn.Module, state: _State) -> None:
global _module_state_mapping
if module in _module_state_mapping:
raise AssertionError(f"Inserting {module} more than once.")
_module_state_mapping[module] = weakref.ref(state)
def _get_module_state(module: nn.Module) -> Optional[_State]:
"""
Return the ``_State`` in ``model``.
Given a ``module``, this API finds out if the module is also a ``_State``
instance or if the module is managed by a composable API. If the module
is also a ``_State``, ``module`` will be casted to ``_State` and returned.
If it is managed by a composable API, the corresponding ``_State`` will
be returned.
"""
global _module_state_mapping
if isinstance(module, _State):
# pyrefly: ignore [redundant-cast]
return cast(_State, module)
else:
# https://github.com/pytorch/pytorch/issues/107054
if module in _module_state_mapping:
state_ref = _module_state_mapping[module]
state = state_ref()
if state is None:
raise AssertionError("State has already been garbage collected")
return state
else:
return None
| _State |
python | django__django | django/db/models/enums.py | {
"start": 202,
"end": 1700
} | class ____(EnumType):
"""A metaclass for creating a enum choices."""
def __new__(metacls, classname, bases, classdict, **kwds):
labels = []
for key in classdict._member_names:
value = classdict[key]
if (
isinstance(value, (list, tuple))
and len(value) > 1
and isinstance(value[-1], (Promise, str))
):
*value, label = value
value = tuple(value)
else:
label = key.replace("_", " ").title()
labels.append(label)
# Use dict.__setitem__() to suppress defenses against double
# assignment in enum's classdict.
dict.__setitem__(classdict, key, value)
cls = super().__new__(metacls, classname, bases, classdict, **kwds)
for member, label in zip(cls.__members__.values(), labels):
member._label_ = label
return enum.unique(cls)
@property
def names(cls):
empty = ["__empty__"] if hasattr(cls, "__empty__") else []
return empty + [member.name for member in cls]
@property
def choices(cls):
empty = [(None, cls.__empty__)] if hasattr(cls, "__empty__") else []
return empty + [(member.value, member.label) for member in cls]
@property
def labels(cls):
return [label for _, label in cls.choices]
@property
def values(cls):
return [value for value, _ in cls.choices]
| ChoicesType |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/selective_checks.py | {
"start": 4987,
"end": 5133
} | class ____:
pass
ALL_PROVIDERS_SENTINEL = AllProvidersSentinel()
T = TypeVar("T", FileGroupForCi, SelectiveCoreTestType)
| AllProvidersSentinel |
python | numpy__numpy | numpy/polynomial/_polybase.py | {
"start": 443,
"end": 39314
} | class ____(abc.ABC):
"""An abstract base class for immutable series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
symbol : str, optional
Symbol used to represent the independent variable in string
representations of the polynomial expression, e.g. for printing.
The symbol must be a valid Python identifier. Default value is 'x'.
.. versionadded:: 1.24
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
symbol : str
Symbol representing the independent variable.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
# Not hashable
__hash__ = None
# Opt out of numpy ufuncs and Python ops with ndarray subclasses.
__array_ufunc__ = None
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
# Unicode character mappings for improved __str__
_superscript_mapping = str.maketrans({
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹"
})
_subscript_mapping = str.maketrans({
"0": "₀",
"1": "₁",
"2": "₂",
"3": "₃",
"4": "₄",
"5": "₅",
"6": "₆",
"7": "₇",
"8": "₈",
"9": "₉"
})
# Some fonts don't support full unicode character ranges necessary for
# the full set of superscripts and subscripts, including common/default
# fonts in Windows shells/terminals. Therefore, default to ascii-only
# printing on windows.
_use_unicode = not os.name == 'nt'
@property
def symbol(self):
return self._symbol
@property
@abc.abstractmethod
def domain(self):
pass
@property
@abc.abstractmethod
def window(self):
pass
@property
@abc.abstractmethod
def basis_name(self):
pass
@staticmethod
@abc.abstractmethod
def _add(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _sub(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _mul(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _div(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _pow(c, pow, maxpower=None):
pass
@staticmethod
@abc.abstractmethod
def _val(x, c):
pass
@staticmethod
@abc.abstractmethod
def _int(c, m, k, lbnd, scl):
pass
@staticmethod
@abc.abstractmethod
def _der(c, m, scl):
pass
@staticmethod
@abc.abstractmethod
def _fit(x, y, deg, rcond, full):
pass
@staticmethod
@abc.abstractmethod
def _line(off, scl):
pass
@staticmethod
@abc.abstractmethod
def _roots(c):
pass
@staticmethod
@abc.abstractmethod
def _fromroots(r):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
return (
len(self.coef) == len(other.coef)
and np.all(self.coef == other.coef)
)
def has_samedomain(self, other):
"""Check if domains match.
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
elif self.symbol != other.symbol:
raise ValueError("Polynomial symbols differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None, symbol='x'):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
# Validation for symbol
try:
if not symbol.isidentifier():
raise ValueError(
"Symbol string must be a valid Python identifier"
)
# If a user passes in something other than a string, the above
# results in an AttributeError. Catch this and raise a more
# informative exception
except AttributeError:
raise TypeError("Symbol must be a non-empty string")
self._symbol = symbol
def __repr__(self):
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return (f"{name}({coef}, domain={domain}, window={window}, "
f"symbol='{self.symbol}')")
def __format__(self, fmt_str):
if fmt_str == '':
return self.__str__()
if fmt_str not in ('ascii', 'unicode'):
raise ValueError(
f"Unsupported format string '{fmt_str}' passed to "
f"{self.__class__}.__format__. Valid options are "
f"'ascii' and 'unicode'"
)
if fmt_str == 'ascii':
return self._generate_string(self._str_term_ascii)
return self._generate_string(self._str_term_unicode)
def __str__(self):
if self._use_unicode:
return self._generate_string(self._str_term_unicode)
return self._generate_string(self._str_term_ascii)
def _generate_string(self, term_method):
"""
Generate the full string representation of the polynomial, using
``term_method`` to generate each polynomial term.
"""
# Get configuration for line breaks
linewidth = np.get_printoptions().get('linewidth', 75)
if linewidth < 1:
linewidth = 1
out = pu.format_float(self.coef[0])
off, scale = self.mapparms()
scaled_symbol, needs_parens = self._format_term(pu.format_float,
off, scale)
if needs_parens:
scaled_symbol = '(' + scaled_symbol + ')'
for i, coef in enumerate(self.coef[1:]):
out += " "
power = str(i + 1)
# Polynomial coefficient
# The coefficient array can be an object array with elements that
# will raise a TypeError with >= 0 (e.g. strings or Python
# complex). In this case, represent the coefficient as-is.
try:
if coef >= 0:
next_term = "+ " + pu.format_float(coef, parens=True)
else:
next_term = "- " + pu.format_float(-coef, parens=True)
except TypeError:
next_term = f"+ {coef}"
# Polynomial term
next_term += term_method(power, scaled_symbol)
# Length of the current line with next term added
line_len = len(out.split('\n')[-1]) + len(next_term)
# If not the last term in the polynomial, it will be two
# characters longer due to the +/- with the next term
if i < len(self.coef[1:]) - 1:
line_len += 2
# Handle linebreaking
if line_len >= linewidth:
next_term = next_term.replace(" ", "\n", 1)
out += next_term
return out
@classmethod
def _str_term_unicode(cls, i, arg_str):
"""
String representation of single polynomial term using unicode
characters for superscripts and subscripts.
"""
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis_name, or override "
"_str_term_unicode(cls, i, arg_str)"
)
return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}"
f"({arg_str})")
@classmethod
def _str_term_ascii(cls, i, arg_str):
"""
String representation of a single polynomial term using ** and _ to
represent superscripts and subscripts, respectively.
"""
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis_name, or override "
"_str_term_ascii(cls, i, arg_str)"
)
return f" {cls.basis_name}_{i}({arg_str})"
@classmethod
def _repr_latex_term(cls, i, arg_str, needs_parens):
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis name, or override "
"_repr_latex_term(i, arg_str, needs_parens)")
# since we always add parens, we don't care if the expression needs them
return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})"
@staticmethod
def _repr_latex_scalar(x, parens=False):
# TODO: we're stuck with disabling math formatting until we handle
# exponents in this function
return fr'\text{{{pu.format_float(x, parens=parens)}}}'
def _format_term(self, scalar_format: Callable, off: float, scale: float):
""" Format a single term in the expansion """
if off == 0 and scale == 1:
term = self.symbol
needs_parens = False
elif scale == 1:
term = f"{scalar_format(off)} + {self.symbol}"
needs_parens = True
elif off == 0:
term = f"{scalar_format(scale)}{self.symbol}"
needs_parens = True
else:
term = (
f"{scalar_format(off)} + "
f"{scalar_format(scale)}{self.symbol}"
)
needs_parens = True
return term, needs_parens
def _repr_latex_(self):
# get the scaled argument string to the basis functions
off, scale = self.mapparms()
term, needs_parens = self._format_term(self._repr_latex_scalar,
off, scale)
mute = r"\color{{LightGray}}{{{}}}".format
parts = []
for i, c in enumerate(self.coef):
# prevent duplication of + and - signs
if i == 0:
coef_str = f"{self._repr_latex_scalar(c)}"
elif not isinstance(c, numbers.Real):
coef_str = f" + ({self._repr_latex_scalar(c)})"
elif c >= 0:
coef_str = f" + {self._repr_latex_scalar(c, parens=True)}"
else:
coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}"
# produce the string for the term
term_str = self._repr_latex_term(i, term, needs_parens)
if term_str == '1':
part = coef_str
else:
part = rf"{coef_str}\,{term_str}"
if c == 0:
part = mute(part)
parts.append(part)
if parts:
body = ''.join(parts)
else:
# in case somehow there are no coefficients at all
body = '0'
return rf"${self.symbol} \mapsto {body}$"
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
ret['symbol'] = self.symbol
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
arg = pu.mapdomain(arg, self.domain, self.window)
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(
-self.coef, self.domain, self.window, self.symbol
)
def __pos__(self):
return self
def __add__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._add(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __sub__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._sub(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __mul__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._mul(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, numbers.Number) or isinstance(other, bool):
raise TypeError(
f"unsupported types for true division: "
f"'{type(self)}', '{type(other)}'"
)
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
othercoef = self._get_coefficients(other)
try:
quo, rem = self._div(self.coef, othercoef)
except ZeroDivisionError:
raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window, self.symbol)
rem = self.__class__(rem, self.domain, self.window, self.symbol)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window, self.symbol)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window, self.symbol)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError:
raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window, self.symbol)
rem = self.__class__(rem, self.domain, self.window, self.symbol)
return quo, rem
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef) and
(self.symbol == other.symbol))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window, self.symbol)
def degree(self):
"""The degree of the series.
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
Examples
--------
Create a polynomial object for ``1 + 7*x + 4*x**2``:
>>> np.polynomial.set_default_printstyle("unicode")
>>> poly = np.polynomial.Polynomial([1, 7, 4])
>>> print(poly)
1.0 + 7.0·x + 4.0·x²
>>> poly.degree()
2
Note that this method does not check for non-zero coefficients.
You must trim the polynomial to remove any trailing zeroes:
>>> poly = np.polynomial.Polynomial([1, 7, 0])
>>> print(poly)
1.0 + 7.0·x + 0.0·x²
>>> poly.degree()
2
>>> poly.trim().degree()
1
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
New instance of series with trimmed coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window, self.symbol)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window, self.symbol)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window, symbol=self.symbol))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl * lbnd
coef = self._int(self.coef, m, k, lbnd, 1. / scl)
return self.__class__(coef, self.domain, self.window, self.symbol)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window, self.symbol)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decreases the further outside the `domain` they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None, symbol='x'):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,)
y-coordinates of the M sample points ``(x[i], y[i])``.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is ``len(x)*eps``, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None, the weight ``w[i]`` applies to the unsquared
residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
chosen so that the errors of the products ``w[i]*y[i]`` all have
the same variance. When using inverse-variance weighting, use
``w[i] = 1/sigma(y[i])``. The default value is None.
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain and window specified in the call. If the
coefficients for the unscaled and unshifted basis polynomials are
of interest, do ``new_series.convert().coef``.
[resid, rank, sv, rcond] : list
These values are only returned if ``full == True``
- resid -- sum of squared residuals of the least squares fit
- rank -- the numerical rank of the scaled Vandermonde matrix
- sv -- singular values of the scaled Vandermonde matrix
- rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
if domain[0] == domain[1]:
domain[0] -= 1
domain[1] += 1
elif isinstance(domain, list) and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return (
cls(coef, domain=domain, window=window, symbol=symbol), status
)
else:
coef = res
return cls(coef, domain=domain, window=window, symbol=symbol)
@classmethod
def fromroots(cls, roots, domain=[], window=None, symbol='x'):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif isinstance(domain, list) and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl * roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window, symbol=symbol)
@classmethod
def identity(cls, domain=None, window=None, symbol='x'):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window, symbol)
@classmethod
def basis(cls, deg, domain=None, window=None, symbol='x'):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0] * ideg + [1], domain, window, symbol)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
| ABCPolyBase |
python | getsentry__sentry | src/sentry/notifications/platform/service.py | {
"start": 1129,
"end": 8843
} | class ____[T: NotificationData]:
def __init__(self, *, data: T):
self.data: Final[T] = data
@staticmethod
def has_access(organization: Organization, source: str) -> bool:
if not features.has("organizations:notification-platform", organization):
return False
option_key = f"notifications.platform-rate.{source}"
try:
options.get(option_key)
except options.UnknownOption:
logger.warning(
"Notification platform key '%s' has not been registered in options/default.py",
option_key,
)
return False
return sample_modulo(option_key, organization.id)
def notify_target(self, *, target: NotificationTarget) -> None:
"""
Send a notification directly to a target synchronously.
NOTE: This method ignores notification settings. When possible, consider using a strategy instead of
using this method directly to prevent unwanted noise associated with your notifications.
NOTE: Use this method when you care about the notification sending result and delivering that back to the user.
Otherwise, we generally reccomend using the async version.
"""
if not self.data:
raise NotificationServiceError(
"Notification service must be initialized with data before sending!"
)
event_lifecycle = NotificationEventLifecycleMetric(
interaction_type=NotificationInteractionType.NOTIFY_TARGET_SYNC,
notification_source=self.data.source,
notification_provider=target.provider_key,
)
with event_lifecycle.capture() as lifecycle:
# Step 1: Get the provider, and validate the target against it
provider = provider_registry.get(target.provider_key)
provider.validate_target(target=target)
# Step 2: Render the template
template_cls = template_registry.get(self.data.source)
template = template_cls()
# Update the lifecycle with the notification category now that we know it
event_lifecycle.notification_category = template.category
renderable = NotificationService.render_template(
data=self.data, template=template, provider=provider
)
# Step 3: Send the notification
try:
provider.send(target=target, renderable=renderable)
except IntegrationConfigurationError as e:
lifecycle.record_halt(halt_reason=e, create_issue=False)
raise
except Exception as e:
lifecycle.record_failure(failure_reason=e, create_issue=True)
raise
return None
@classmethod
def render_template[RenderableT](
cls,
data: T,
template: NotificationTemplate[T],
provider: type[NotificationProvider[RenderableT]],
) -> RenderableT:
rendered_template = template.render(data=data)
renderer = provider.get_renderer(data=data, category=template.category)
return renderer.render(data=data, rendered_template=rendered_template)
def notify_async(
self,
*,
strategy: NotificationStrategy | None = None,
targets: list[NotificationTarget] | None = None,
) -> None:
"""
Send a notification directly to a target via task, if you care about using the result of the notification, use notify_sync instead.
"""
self._validate_strategy_and_targets(strategy=strategy, targets=targets)
targets = self._get_targets(strategy=strategy, targets=targets)
for target in targets:
serialized_target = NotificationTargetDto(target=target)
notify_target_async.delay(
data=self.data,
nested_target=serialized_target.to_dict(),
)
def notify_sync(
self,
*,
strategy: NotificationStrategy | None = None,
targets: list[NotificationTarget] | None = None,
) -> Mapping[NotificationProviderKey, list[str]]:
self._validate_strategy_and_targets(strategy=strategy, targets=targets)
targets = self._get_targets(strategy=strategy, targets=targets)
errors = defaultdict(list)
for target in targets:
try:
self.notify_target(target=target)
except IntegrationConfigurationError as e:
errors[target.provider_key].append(str(e))
except Exception as e:
sentry_sdk.capture_exception(e)
return errors
def _validate_strategy_and_targets(
self,
*,
strategy: NotificationStrategy | None = None,
targets: list[NotificationTarget] | None = None,
) -> None:
if not strategy and not targets:
raise NotificationServiceError(
"Must provide either a strategy or targets. Strategy is preferred."
)
if strategy and targets:
raise NotificationServiceError(
"Cannot provide both strategy and targets, only one is permitted. Strategy is preferred."
)
def _get_targets(
self,
*,
strategy: NotificationStrategy | None = None,
targets: list[NotificationTarget] | None = None,
) -> list[NotificationTarget]:
if strategy:
targets = strategy.get_targets()
if not targets:
logger.warning("Strategy '%s' did not yield targets", strategy.__class__.__name__)
return []
return targets
@instrumented_task(
name="src.sentry.notifications.platform.service.notify_target_async",
namespace=notifications_tasks,
processing_deadline_duration=30,
silo_mode=SiloMode.REGION,
)
def notify_target_async[T: NotificationData](
*,
data: T,
nested_target: dict[str, Any],
) -> None:
"""
Send a notification directly to a target asynchronously.
NOTE: This method ignores notification settings. When possible, consider using a strategy instead of
using this method directly to prevent unwanted noise associated with your notifications.
"""
lifecycle_metric = NotificationEventLifecycleMetric(
interaction_type=NotificationInteractionType.NOTIFY_TARGET_ASYNC,
notification_source=data.source,
)
with lifecycle_metric.capture() as lifecycle:
# Step 1: Deserialize the target from nested structure
serialized_target = NotificationTargetDto.from_dict(nested_target)
target = serialized_target.target
lifecycle_metric.notification_provider = target.provider_key
lifecycle.add_extras({"source": data.source, "target": target.to_dict()})
# Step 2: Get the provider, and validate the target against it
provider = provider_registry.get(target.provider_key)
provider.validate_target(target=target)
# Step 3: Render the template
template_cls = template_registry.get(data.source)
template = template_cls()
lifecycle_metric.notification_category = template.category
renderable = NotificationService.render_template(
data=data, template=template, provider=provider
)
# Step 4: Send the notification
try:
provider.send(target=target, renderable=renderable)
except IntegrationConfigurationError as e:
lifecycle.record_halt(halt_reason=e, create_issue=False)
except Exception as e:
lifecycle.record_failure(failure_reason=e, create_issue=True)
| NotificationService |
python | faif__python-patterns | patterns/behavioral/chaining_method.py | {
"start": 37,
"end": 242
} | class ____:
def __init__(self, name: str) -> None:
self.name = name
def do_action(self, action: Action) -> Action:
print(self.name, action.name, end=" ")
return action
| Person |
python | redis__redis-py | redis/multidb/command_executor.py | {
"start": 2037,
"end": 4428
} | class ____(CommandExecutor):
@property
@abstractmethod
def databases(self) -> Databases:
"""Returns a list of databases."""
pass
@property
@abstractmethod
def failure_detectors(self) -> List[FailureDetector]:
"""Returns a list of failure detectors."""
pass
@abstractmethod
def add_failure_detector(self, failure_detector: FailureDetector) -> None:
"""Adds a new failure detector to the list of failure detectors."""
pass
@property
@abstractmethod
def active_database(self) -> Optional[Database]:
"""Returns currently active database."""
pass
@active_database.setter
@abstractmethod
def active_database(self, database: SyncDatabase) -> None:
"""Sets the currently active database."""
pass
@property
@abstractmethod
def active_pubsub(self) -> Optional[PubSub]:
"""Returns currently active pubsub."""
pass
@active_pubsub.setter
@abstractmethod
def active_pubsub(self, pubsub: PubSub) -> None:
"""Sets currently active pubsub."""
pass
@property
@abstractmethod
def failover_strategy_executor(self) -> FailoverStrategyExecutor:
"""Returns failover strategy executor."""
pass
@property
@abstractmethod
def command_retry(self) -> Retry:
"""Returns command retry object."""
pass
@abstractmethod
def pubsub(self, **kwargs):
"""Initializes a PubSub object on a currently active database"""
pass
@abstractmethod
def execute_command(self, *args, **options):
"""Executes a command and returns the result."""
pass
@abstractmethod
def execute_pipeline(self, command_stack: tuple):
"""Executes a stack of commands in pipeline."""
pass
@abstractmethod
def execute_transaction(
self, transaction: Callable[[Pipeline], None], *watches, **options
):
"""Executes a transaction block wrapped in callback."""
pass
@abstractmethod
def execute_pubsub_method(self, method_name: str, *args, **kwargs):
"""Executes a given method on active pub/sub."""
pass
@abstractmethod
def execute_pubsub_run(self, sleep_time: float, **kwargs) -> Any:
"""Executes pub/sub run in a thread."""
pass
| SyncCommandExecutor |
python | getsentry__sentry | tests/sentry/integrations/slack/service/test_slack_service.py | {
"start": 2850,
"end": 21374
} | class ____(TestCase):
def setUp(self) -> None:
self.service = SlackService.default()
self.activity = Activity.objects.create(
group=self.group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
self.channel_id = "C0123456789"
self.message_identifier = "1a2s3d"
self.rule_action_uuid = str(uuid4())
self.notify_issue_owners_action = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
"uuid": self.rule_action_uuid,
}
]
self.rule = self.create_project_rule(
project=self.project, action_data=self.notify_issue_owners_action
)
self.rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=self.group,
event_id=456,
notification_uuid=str(uuid4()),
)
self.parent_notification = NotificationMessage.objects.create(
rule_fire_history_id=self.rule_fire_history.id,
rule_action_uuid=self.rule_action_uuid,
message_identifier=self.message_identifier,
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_integration(
organization=self.organization,
name="slack",
provider="slack",
external_id="slack:1",
metadata={"access_token": "xoxb-access-token"},
)
self.action = self.create_action()
self.parent_notification_action = NotificationMessage.objects.create(
message_identifier=self.message_identifier,
action=self.action,
group=self.group,
)
def test_none_group(self) -> None:
self.activity.update(group=None)
with mock.patch.object(self.service, "_logger") as mock_logger:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_logger.debug.assert_called_with(
"no group associated on the activity, nothing to do",
extra={
"activity_id": self.activity.id,
"project_id": self.activity.project.id,
},
)
def test_none_user_id(self) -> None:
self.activity.update(user_id=None)
with mock.patch.object(self.service, "_logger") as mock_logger:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_logger.debug.assert_called_with(
"machine/system updates are ignored at this time, nothing to do",
extra={
"activity_id": self.activity.id,
"project_id": self.activity.project.id,
"group_id": self.activity.group.id,
"organization_id": self.organization.id,
},
)
def test_disabled_option(self) -> None:
OrganizationOption.objects.set_value(
self.organization, "sentry:issue_alerts_thread_flag", False
)
with mock.patch.object(self.service, "_logger") as mock_logger:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_logger.info.assert_called_with(
"feature is turned off for this organization",
extra={
"activity_id": self.activity.id,
"project_id": self.activity.project.id,
"group_id": self.activity.group.id,
"organization_id": self.organization.id,
},
)
def test_no_message_to_send(self) -> None:
# unsupported activity
self.activity.update(type=ActivityType.FIRST_SEEN.value)
with mock.patch.object(self.service, "_logger") as mock_logger:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_logger.info.assert_called_with(
"notification to send is invalid",
extra={
"activity_id": self.activity.id,
"project_id": self.activity.project.id,
"group_id": self.activity.group.id,
"organization_id": self.organization.id,
},
)
def test_no_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
with mock.patch.object(self.service, "_logger") as mock_logger:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_logger.info.assert_called_with(
"no integration found for activity",
extra={
"activity_id": self.activity.id,
"project_id": self.activity.project.id,
"group_id": self.activity.group.id,
"organization_id": self.organization.id,
},
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification"
)
def test_calls_handle_parent_notification(
self, mock_get_channel_id, mock_send_notification, mock_record
):
IssueAlertNotificationMessage.from_model(instance=self.parent_notification)
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_get_channel_id.assert_called()
assert mock_get_channel_id.call_args.args[0].message_identifier == self.message_identifier
assert len(mock_record.mock_calls) == 4
start_1, end_1, start_2, end_2 = mock_record.mock_calls
assert start_1.args[0] == EventLifecycleOutcome.STARTED
assert start_2.args[0] == EventLifecycleOutcome.STARTED
assert end_1.args[0] == EventLifecycleOutcome.SUCCESS
assert end_2.args[0] == EventLifecycleOutcome.SUCCESS
@with_feature("organizations:slack-threads-refactor-uptime")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification"
)
def test_handle_parent_notification_with_open_period(
self, mock_get_channel_id, mock_send_notification, mock_record
) -> None:
group = self.create_group(type=UptimeDomainCheckFailure.type_id)
activity = Activity.objects.create(
group=group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=group,
event_id=456,
notification_uuid=str(uuid4()),
)
# Create two parent notifications with different open periods
NotificationMessage.objects.create(
id=123,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=timezone.now() - timedelta(minutes=1),
)
parent_notification_2_message = NotificationMessage.objects.create(
id=124,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=timezone.now(),
)
self.service.notify_all_threads_for_activity(activity=activity)
# Verify only one notification was handled
assert mock_send_notification.call_count == 1
# Verify it was the newer notification
mock_send_notification.assert_called_once()
assert mock_get_channel_id.call_args.args[0].id == parent_notification_2_message.id
@with_feature("organizations:slack-threads-refactor-uptime")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification"
)
def test_handle_parent_notification_with_open_period_model_open_period_model(
self, mock_get_channel_id, mock_send_notification, mock_record
) -> None:
group = self.create_group(type=UptimeDomainCheckFailure.type_id)
activity = Activity.objects.create(
group=group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=group,
event_id=456,
notification_uuid=str(uuid4()),
)
# Create two parent notifications with different open periods
NotificationMessage.objects.create(
id=123,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=timezone.now() - timedelta(minutes=1),
)
# Create a new open period
latest_open_period = get_latest_open_period(group)
parent_notification_2_message = NotificationMessage.objects.create(
id=124,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=latest_open_period.date_started if latest_open_period else None,
)
self.service.notify_all_threads_for_activity(activity=activity)
# Verify only one notification was handled
assert mock_send_notification.call_count == 1
# Verify it was the newer notification
mock_send_notification.assert_called_once()
assert mock_get_channel_id.call_args.args[0].id == parent_notification_2_message.id
@with_feature("organizations:slack-threads-refactor-uptime")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification"
)
def test_handle_parent_notification_with_open_period_uptime_resolved(
self, mock_get_channel_id, mock_send_notification, mock_record
) -> None:
group = self.create_group(type=UptimeDomainCheckFailure.type_id)
activity = Activity.objects.create(
group=group,
project=self.project,
type=ActivityType.SET_RESOLVED.value,
user_id=None,
data={"ignoreUntilEscalating": True},
)
rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=group,
event_id=456,
notification_uuid=str(uuid4()),
)
# Create two parent notifications with different open periods
NotificationMessage.objects.create(
id=123,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=timezone.now() - timedelta(minutes=1),
)
parent_notification_2_message = NotificationMessage.objects.create(
id=124,
date_added=timezone.now(),
message_identifier=self.message_identifier,
rule_action_uuid=self.rule_action_uuid,
rule_fire_history=rule_fire_history,
open_period_start=timezone.now(),
)
self.service.notify_all_threads_for_activity(activity=activity)
# Verify only one notification was handled
assert mock_send_notification.call_count == 1
# Verify it was the newer notification for resolved activities
mock_send_notification.assert_called_once()
assert mock_get_channel_id.call_args.args[0].id == parent_notification_2_message.id
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
def test_no_parent_notification(self, mock_send: mock.MagicMock) -> None:
self.parent_notification.delete()
self.service.notify_all_threads_for_activity(activity=self.activity)
assert not mock_send.called
def test_none_user_id_uptime_resolved(self) -> None:
"""Test that uptime resolved notifications are allowed even without a user_id"""
self.group.update(type=UptimeDomainCheckFailure.type_id)
self.activity.update(
user_id=None,
type=ActivityType.SET_RESOLVED.value,
)
with mock.patch.object(self.service, "_notify_all_threads_for_activity") as mock_notify:
self.service.notify_all_threads_for_activity(activity=self.activity)
mock_notify.assert_called_once()
@with_feature("organizations:workflow-engine-trigger-actions")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification_notification_action"
)
def test_handle_parent_notification_with_notification_action(
self, mock_get_channel_id, mock_send_notification, mock_record
):
"""Test that notification action repository is used when feature flag is enabled"""
activity = Activity.objects.create(
group=self.group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
NotificationActionNotificationMessage.from_model(
instance=self.parent_notification_action,
)
uuid = uuid4()
with mock.patch("uuid.uuid4", return_value=uuid):
self.service.notify_all_threads_for_activity(activity=activity)
group_link = self.group.get_absolute_url(
params={
"referrer": "activity_notification",
"notification_uuid": uuid,
}
)
# Verify the notification action repository was used
assert mock_send_notification.call_count == 1
mock_send_notification.assert_called_with(
channel_id=mock_get_channel_id.return_value,
message_identifier=self.message_identifier,
notification_to_send=f"admin@localhost archived <{group_link}|{self.group.qualified_short_id}>",
client=mock.ANY,
)
assert (
mock_get_channel_id.call_args.args[0].__class__ == NotificationActionNotificationMessage
)
@with_feature(
{
"organizations:workflow-engine-trigger-actions": True,
"organizations:slack-threads-refactor-uptime": True,
}
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.integrations.slack.service.SlackService._send_notification_to_slack_channel"
)
@mock.patch(
"sentry.integrations.slack.service.SlackService._get_channel_id_from_parent_notification_notification_action"
)
def test_handle_parent_notification_with_notification_action_and_open_period(
self, mock_get_channel_id, mock_send_notification, mock_record
):
"""Test that notification action repository is used with open period when both features are enabled"""
group = self.create_group(type=UptimeDomainCheckFailure.type_id)
activity = Activity.objects.create(
group=group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
# Create NotificationMessage model instances first
NotificationMessage.objects.create(
id=123,
date_added=timezone.now(),
message_identifier=self.message_identifier,
group=group,
action=self.action,
open_period_start=timezone.now() - timedelta(minutes=1),
)
notification_2 = NotificationMessage.objects.create(
id=124,
date_added=timezone.now(),
message_identifier=self.message_identifier,
group=group,
action=self.action,
open_period_start=timezone.now(),
)
# Convert to NotificationActionNotificationMessage objects
parent_notification_2_message = NotificationActionNotificationMessage.from_model(
notification_2
)
self.service.notify_all_threads_for_activity(activity=activity)
# Verify only one notification was handled
assert mock_send_notification.call_count == 1
# Verify it was the newer notification
mock_send_notification.assert_called_once()
assert mock_get_channel_id.call_args.args[0].id == parent_notification_2_message.id
assert (
mock_get_channel_id.call_args.args[0].__class__ == NotificationActionNotificationMessage
)
| TestNotifyAllThreadsForActivity |
python | huggingface__transformers | src/transformers/models/bert_generation/modeling_bert_generation.py | {
"start": 1742,
"end": 3497
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
| BertGenerationSelfOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.