language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-poetry__poetry | src/poetry/mixology/version_solver.py | {
"start": 1005,
"end": 1486
} | class ____(IntEnum):
"""
Preference is one of the criteria for choosing which dependency to solve
first. A higher value means that there are "more options" to satisfy
a dependency. A lower value takes precedence.
"""
DIRECT_ORIGIN = 0
NO_CHOICE = 1
USE_LATEST = 2
LOCKED = 3
DEFAULT = 4
CompKey = tuple[Preference, int, bool, int]
DependencyCacheKey = tuple[
str, Optional[str], Optional[str], Optional[str], Optional[str]
]
| Preference |
python | pandas-dev__pandas | pandas/tests/indexes/categorical/test_category.py | {
"start": 351,
"end": 9816
} | class ____:
@pytest.fixture
def simple_index(self) -> CategoricalIndex:
"""
Fixture that provides a CategoricalIndex.
"""
return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
def test_can_hold_identifiers(self):
idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False)
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_insert(self, simple_index):
ci = simple_index
categories = ci.categories
# test 0th element
result = ci.insert(0, "a")
expected = CategoricalIndex(list("aaabbca"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, "a")
expected = CategoricalIndex(list("aabbcaa"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex([], categories=categories).insert(0, "a")
expected = CategoricalIndex(["a"], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid -> cast to object
expected = ci.astype(object).insert(0, "d")
result = ci.insert(0, "d").astype(object)
tm.assert_index_equal(result, expected, exact=True)
# GH 18295 (test missing)
expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"])
for na in (np.nan, pd.NaT, None):
result = CategoricalIndex(list("aabcb")).insert(1, na)
tm.assert_index_equal(result, expected)
def test_insert_na_mismatched_dtype(self):
ci = CategoricalIndex([0, 1, 1])
result = ci.insert(0, pd.NaT)
expected = Index([pd.NaT, 0, 1, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self, simple_index):
ci = simple_index
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list("abbca"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list("aabbc"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with tm.external_error_raised((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
@pytest.mark.parametrize(
"data, non_lexsorted_data",
[[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]],
)
def test_is_monotonic(self, data, non_lexsorted_data):
c = CategoricalIndex(data)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(data, ordered=True)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(data, categories=reversed(data))
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is True
c = CategoricalIndex(data, categories=reversed(data), ordered=True)
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is True
# test when data is neither monotonic increasing nor decreasing
reordered_data = [data[0], data[2], data[1]]
c = CategoricalIndex(reordered_data, categories=reversed(data))
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is False
# non lexsorted categories
categories = non_lexsorted_data
c = CategoricalIndex(categories[:2], categories=categories)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(categories[1:3], categories=categories)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
def test_has_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name="foo")
assert idx.is_unique is False
assert idx.has_duplicates is True
idx = CategoricalIndex([None, None], categories=[2, 3], name="foo")
assert idx.is_unique is False
assert idx.has_duplicates is True
idx = CategoricalIndex([None, 1, 2, 3], categories=[1, 2, 3], name="foo")
assert idx.is_unique is True
assert idx.has_duplicates is False
@pytest.mark.parametrize(
"data, categories, expected",
[
(
[1, 1, 1],
[1, 2, 3],
{
"first": np.array([False, True, True]),
"last": np.array([True, True, False]),
False: np.array([True, True, True]),
},
),
(
[None, None, None],
list("abc"),
{
"first": np.array([False, True, True]),
"last": np.array([True, True, False]),
False: np.array([True, True, True]),
},
),
(
[None, "a", "b"],
list("abc"),
{
"first": np.zeros(shape=(3), dtype=np.bool_),
"last": np.zeros(shape=(3), dtype=np.bool_),
False: np.zeros(shape=(3), dtype=np.bool_),
},
),
(
list("abb"),
list("abc"),
{
"first": np.array([False, False, True]),
"last": np.array([False, True, False]),
False: np.array([False, True, True]),
},
),
],
)
def test_drop_duplicates(self, data, categories, expected):
idx = CategoricalIndex(data, categories=categories, name="foo")
for keep, e in expected.items():
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e)
e = idx[~e]
result = idx.drop_duplicates(keep=keep)
tm.assert_index_equal(result, e)
@pytest.mark.parametrize(
"data, categories, expected_data",
[
([1, 1, 1], [1, 2, 3], [1]),
([1, 1, 1], list("abc"), [np.nan]),
([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]),
([2, "a", "b"], list("abc"), [np.nan, "a", "b"]),
],
)
def test_unique(self, data, categories, expected_data, ordered):
dtype = CategoricalDtype(categories, ordered=ordered)
msg = "Constructing a Categorical with a dtype and values containing"
warn = None if expected_data == [1] else Pandas4Warning
with tm.assert_produces_warning(warn, match=msg):
idx = CategoricalIndex(data, dtype=dtype)
expected = CategoricalIndex(expected_data, dtype=dtype)
tm.assert_index_equal(idx.unique(), expected)
def test_repr_roundtrip(self):
ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
str(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100))
str(ci)
def test_isin(self):
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
tm.assert_numpy_array_equal(
ci.isin(["c"]), np.array([False, False, False, True, False, False])
)
tm.assert_numpy_array_equal(
ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False])
)
tm.assert_numpy_array_equal(
ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6)
)
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list("abcdefghi")))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list("defghi")))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_isin_overlapping_intervals(self):
# GH 34974
idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)])
result = CategoricalIndex(idx).isin(idx)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.values is not an ndarray.
index = CategoricalIndex(list("ab") * 5)
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert not np.shares_memory(result._data._codes, index._data._codes)
result = CategoricalIndex(index.values, copy=False)
assert result._data._codes is index._data._codes
| TestCategoricalIndex |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nn_functional.py | {
"start": 16095,
"end": 19066
} | class ____(Operator):
"""Operator for torch.nn.functional.rms_norm (Root Mean Square Normalization).
RMSNorm is commonly used in modern LLMs like LLaMA. It normalizes by the RMS of the input.
"""
def __init__(self):
super().__init__("torch.nn.functional.rms_norm")
self.weight = 5.0
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.rms_norm"
def can_produce(self, output_spec: Spec) -> bool:
"""RMSNorm can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
# RMSNorm needs at least 1 dimension to normalize over
if len(output_spec.size) == 0:
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for RMSNorm operation.
RMSNorm requires:
- input: input tensor
- weight: (normalized_shape,) [optional]
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("RMSNormOperator can only produce TensorSpec outputs")
if len(output_spec.size) == 0:
raise ValueError("RMSNorm output must have at least 1 dimension")
# Input tensor has same shape and dtype as output
input_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
# Weight tensor (optional with 70% probability)
normalized_shape = output_spec.size[-1:]
specs = [input_spec]
if random.random() < 0.7:
weight_spec = TensorSpec(
size=normalized_shape, stride=(1,), dtype=output_spec.dtype
)
specs.append(weight_spec)
from typing import cast
return cast(list[Spec], specs)
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for RMSNorm operation."""
if len(input_names) < 1 or len(input_names) > 2:
raise ValueError("RMSNorm requires 1-2 inputs: input, optional weight")
if not isinstance(output_spec, TensorSpec):
raise ValueError("RMSNormOperator can only produce TensorSpec outputs")
target_dtype = str(output_spec.dtype)
input_name = input_names[0]
# Normalize over the last dimension
normalized_shape = f"({output_spec.size[-1]},)"
if len(input_names) == 1:
return f"{output_name} = torch.nn.functional.rms_norm({input_name}.to({target_dtype}), {normalized_shape})"
else: # len(input_names) == 2
weight_name = input_names[1]
return f"{output_name} = torch.nn.functional.rms_norm({input_name}.to({target_dtype}), {normalized_shape}, weight={weight_name}.to({target_dtype}))"
| RMSNormOperator |
python | coleifer__peewee | playhouse/migrate.py | {
"start": 16805,
"end": 17185
} | class ____(PostgresqlMigrator):
explicit_create_foreign_key = True
def add_inline_fk_sql(self, ctx, field):
pass
@operation
def drop_index(self, table, index_name):
return (self
.make_context()
.literal('DROP INDEX ')
.sql(Entity(index_name))
.literal(' CASCADE'))
| CockroachDBMigrator |
python | sqlalchemy__sqlalchemy | test/orm/test_naturalpks.py | {
"start": 25177,
"end": 29199
} | class ____(fixtures.MappedTest):
# mssql, mysql don't allow
# ON UPDATE on self-referential keys
__unsupported_on__ = ("mssql", "mysql", "mariadb")
__requires__ = ("on_update_or_deferrable_fks",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table(
"nodes",
metadata,
Column("name", String(50), primary_key=True),
Column("parent", String(50), ForeignKey("nodes.name", **fk_args)),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
pass
def test_one_to_many_on_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node,
backref=sa.orm.backref(
"parentnode",
remote_side=nodes.c.name,
passive_updates=False,
),
)
},
)
sess = fixture_session(future=True)
n1 = Node(name="n1")
sess.add(n1)
n2 = Node(name="n11", parentnode=n1)
n3 = Node(name="n12", parentnode=n1)
n4 = Node(name="n13", parentnode=n1)
sess.add_all([n2, n3, n4])
sess.commit()
n1.name = "new n1"
sess.commit()
eq_(
["new n1", "new n1", "new n1"],
[
n.parent
for n in sess.query(Node).filter(
Node.name.in_(["n11", "n12", "n13"])
)
],
)
def test_one_to_many_on_o2m(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node,
backref=sa.orm.backref(
"parentnode", remote_side=nodes.c.name
),
passive_updates=False,
)
},
)
sess = fixture_session()
n1 = Node(name="n1")
n1.children.append(Node(name="n11"))
n1.children.append(Node(name="n12"))
n1.children.append(Node(name="n13"))
sess.add(n1)
sess.commit()
n1.name = "new n1"
sess.commit()
eq_(n1.children[1].parent, "new n1")
eq_(
["new n1", "new n1", "new n1"],
[
n.parent
for n in sess.query(Node).filter(
Node.name.in_(["n11", "n12", "n13"])
)
],
)
@testing.requires.on_update_cascade
def test_many_to_one_passive(self):
self._test_many_to_one(True)
def test_many_to_one_nonpassive(self):
self._test_many_to_one(False)
def _test_many_to_one(self, passive):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"parentnode": relationship(
Node, remote_side=nodes.c.name, passive_updates=passive
)
},
)
sess = fixture_session()
n1 = Node(name="n1")
n11 = Node(name="n11", parentnode=n1)
n12 = Node(name="n12", parentnode=n1)
n13 = Node(name="n13", parentnode=n1)
sess.add_all([n1, n11, n12, n13])
sess.commit()
n1.name = "new n1"
sess.commit()
eq_(
["new n1", "new n1", "new n1"],
[
n.parent
for n in sess.query(Node).filter(
Node.name.in_(["n11", "n12", "n13"])
)
],
)
| SelfReferentialTest |
python | great-expectations__great_expectations | contrib/cli/great_expectations_contrib/package.py | {
"start": 1045,
"end": 1209
} | class ____(str, Enum):
TWITTER = "TWITTER"
GITHUB = "GITHUB"
LINKEDIN = "LINKEDIN"
MEDIUM = "MEDIUM"
WEBSITE = "WEBSITE"
@dataclass
| SocialLinkType |
python | apache__airflow | dev/breeze/tests/test_ui_commands.py | {
"start": 2379,
"end": 3161
} | class ____:
def test_flatten_simple_dict(self):
data = {"key1": "value1", "key2": "value2"}
keys = flatten_keys(data)
assert set(keys) == {"key1", "key2"}
def test_flatten_nested_dict(self):
data = {"parent": {"child1": "value1", "child2": "value2"}}
keys = flatten_keys(data)
assert set(keys) == {"parent.child1", "parent.child2"}
def test_flatten_deeply_nested_dict(self):
data = {"level1": {"level2": {"level3": "value"}}}
keys = flatten_keys(data)
assert keys == ["level1.level2.level3"]
def test_flatten_mixed_dict(self):
data = {"simple": "value", "nested": {"key": "value2"}}
keys = flatten_keys(data)
assert set(keys) == {"simple", "nested.key"}
| TestFlattenKeys |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/choice.py | {
"start": 964,
"end": 1116
} | class ____(TypedDict):
min_value: int | None
max_value: int | None
weights: dict[int, float] | None
shrink_towards: int
| IntegerConstraints |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 262777,
"end": 264320
} | class ____:
def test_extract_nc4_variable_encoding(self) -> None:
var = xr.Variable(("x",), [1, 2, 3], {}, {"foo": "bar"})
with pytest.raises(ValueError, match=r"unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
var = xr.Variable(("x",), [1, 2, 3], {}, {"chunking": (2, 1)})
encoding = _extract_nc4_variable_encoding(var)
assert {} == encoding
# regression test
var = xr.Variable(("x",), [1, 2, 3], {}, {"shuffle": True})
encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True)
assert {"shuffle": True} == encoding
# Variables with unlim dims must be chunked on output.
var = xr.Variable(("x",), [1, 2, 3], {}, {"contiguous": True})
encoding = _extract_nc4_variable_encoding(var, unlimited_dims=("x",))
assert {} == encoding
@requires_netCDF4
def test_extract_nc4_variable_encoding_netcdf4(self):
# New netCDF4 1.6.0 compression argument.
var = xr.Variable(("x",), [1, 2, 3], {}, {"compression": "szlib"})
_extract_nc4_variable_encoding(var, backend="netCDF4", raise_on_invalid=True)
@pytest.mark.xfail
def test_extract_h5nc_encoding(self) -> None:
# not supported with h5netcdf (yet)
var = xr.Variable(("x",), [1, 2, 3], {}, {"least_significant_digit": 2})
with pytest.raises(ValueError, match=r"unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
| TestEncodingInvalid |
python | pytorch__pytorch | torch/optim/rprop.py | {
"start": 555,
"end": 17717
} | class ____(Optimizer): # noqa: D101
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1e-2,
etas: tuple[float, float] = (0.5, 1.2),
step_sizes: tuple[float, float] = (1e-6, 50),
*,
capturable: bool = False,
foreach: Optional[bool] = None,
maximize: bool = False,
differentiable: bool = False,
) -> None: # noqa: D107
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 < etas[0] < 1.0 < etas[1]:
raise ValueError(f"Invalid eta values: {etas[0]}, {etas[1]}")
defaults = {
"lr": lr,
"etas": etas,
"step_sizes": step_sizes,
"foreach": foreach,
"maximize": maximize,
"differentiable": differentiable,
"capturable": capturable,
}
super().__init__(params, defaults)
def __setstate__(self, state): # noqa: D105
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("foreach", None)
group.setdefault("maximize", False)
group.setdefault("differentiable", False)
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = (
torch.tensor(
step_val, dtype=_get_scalar_dtype(), device=p.device
)
if group["capturable"]
else torch.tensor(step_val, dtype=_get_scalar_dtype())
)
def _init_group(self, group, params, grads, prevs, step_sizes, state_steps):
has_complex = False
for p in group["params"]:
if p.grad is None:
continue
has_complex |= torch.is_complex(p)
params.append(p)
grad = p.grad
if grad.is_sparse:
raise RuntimeError("Rprop does not support sparse gradients")
grads.append(grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
if group["capturable"]
else torch.zeros((), dtype=_get_scalar_dtype())
)
state["prev"] = torch.zeros_like(p, memory_format=torch.preserve_format)
if p.dtype.is_complex:
# Complex Number should be as if they are two independent real numbers.
# Hence the step_size shouldn't be zero for imaginary part.
state["step_size"] = torch.full_like(
grad, complex(group["lr"], group["lr"])
)
else:
state["step_size"] = torch.full_like(grad, _to_scalar(group["lr"]))
prevs.append(state["prev"])
step_sizes.append(state["step_size"])
state_steps.append(state["step"])
return has_complex
@_use_grad_for_differentiable
def step(self, closure=None):
"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params: list[Tensor] = []
grads: list[Tensor] = []
prevs: list[Tensor] = []
step_sizes: list[Tensor] = []
state_steps: list[Tensor] = []
etaminus, etaplus = group["etas"]
step_size_min, step_size_max = group["step_sizes"]
foreach = group["foreach"]
maximize = group["maximize"]
has_complex = self._init_group(
group, params, grads, prevs, step_sizes, state_steps
)
rprop(
params,
grads,
prevs,
step_sizes,
state_steps,
step_size_min=step_size_min,
step_size_max=step_size_max,
etaminus=etaminus,
etaplus=etaplus,
foreach=foreach,
maximize=maximize,
differentiable=group["differentiable"],
capturable=group["capturable"],
has_complex=has_complex,
)
return loss
Rprop.__doc__ = (
r"""Implements the resilient backpropagation algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
\text{ (objective)}, \\
&\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
\text{ (step sizes)} \\
&\textbf{initialize} : g^0_{prev} \leftarrow 0,
\: \eta_0 \leftarrow \text{lr (learning rate)} \\
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\
&\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
\Gamma_{max}) \\
&\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
\Gamma_{min}) \\
&\hspace{15mm} g^i_t \leftarrow 0 \\
&\hspace{10mm} \textbf{else} \: \\
&\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\
&\hspace{5mm}g_{prev} \leftarrow g_t \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to the paper
`A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.""" # codespell:ignore
+ rf"""
Args:
{_params_doc}
lr (float, optional): learning rate (default: 1e-2)
etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that
are multiplicative increase and decrease factors
(default: (0.5, 1.2))
step_sizes (Tuple[float, float], optional): a pair of minimal and
maximal allowed step sizes (default: (1e-6, 50))
{_capturable_doc}
{_foreach_doc}
{_maximize_doc}
{_differentiable_doc}
"""
)
def _single_tensor_rprop(
params: list[Tensor],
grads: list[Tensor],
prevs: list[Tensor],
step_sizes: list[Tensor],
state_steps: list[Tensor],
*,
step_size_min: float,
step_size_max: float,
etaminus: float,
etaplus: float,
maximize: bool,
capturable: bool,
differentiable: bool,
has_complex: bool,
) -> None:
for i, param in enumerate(params):
grad = grads[i]
grad = grad if not maximize else -grad
prev = prevs[i]
step_size = step_sizes[i]
step = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
if not (
param.device.type == step.device.type
and param.device.type in capturable_supported_devices
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
step += 1
if torch.is_complex(param):
grad = torch.view_as_real(grad)
prev = torch.view_as_real(prev)
param = torch.view_as_real(param)
step_size = torch.view_as_real(step_size)
if differentiable:
sign = grad.mul(prev.clone()).sign()
else:
sign = grad.mul(prev).sign()
if capturable:
sign.copy_(torch.where(sign.gt(0), etaplus, sign))
sign.copy_(torch.where(sign.lt(0), etaminus, sign))
sign.copy_(torch.where(sign.eq(0), 1, sign))
else:
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
step_size.mul_(sign).clamp_(step_size_min, step_size_max)
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
grad = grad.clone(memory_format=torch.preserve_format)
if capturable:
grad.copy_(torch.where(sign.eq(etaminus), 0, grad))
else:
grad[sign.eq(etaminus)] = 0
# update parameters
param.addcmul_(grad.sign(), step_size, value=-1)
prev.copy_(grad)
def _multi_tensor_rprop(
params: list[Tensor],
grads: list[Tensor],
prevs: list[Tensor],
step_sizes: list[Tensor],
state_steps: list[Tensor],
*,
step_size_min: float,
step_size_max: float,
etaminus: float,
etaplus: float,
maximize: bool,
capturable: bool,
differentiable: bool,
has_complex: bool,
) -> None:
if len(params) == 0:
return
if differentiable:
raise AssertionError("_foreach ops don't support autograd")
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
if not all(
p.device.type == step.device.type
and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps, strict=True)
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, prevs, step_sizes, state_steps] # type: ignore[list-item]
)
for (
grouped_params_,
grouped_grads_,
grouped_prevs_,
grouped_step_sizes_,
grouped_state_steps_,
), _ in grouped_tensors.values():
grouped_params = cast(list[Tensor], grouped_params_)
grouped_grads = cast(list[Tensor], grouped_grads_)
grouped_prevs = cast(list[Tensor], grouped_prevs_)
grouped_step_sizes = cast(list[Tensor], grouped_step_sizes_)
grouped_state_steps = cast(list[Tensor], grouped_state_steps_)
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch.compiler.is_compiling() and grouped_state_steps[0].is_cpu:
torch._foreach_add_(
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(grouped_state_steps, 1)
# Handle complex params
if has_complex:
_view_as_real(
grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes
)
signs = torch._foreach_mul(grouped_grads, grouped_prevs)
if maximize:
torch._foreach_neg_(signs)
# At the end of the step, grouped_prevs will contain the current grads, so we reuse
# grouped_prevs memory instead of creating a new buffer, but, for clarity, we reassign
# to keep referring to the buffer as grouped_grads.
torch._foreach_copy_(grouped_prevs, grouped_grads)
if maximize:
torch._foreach_neg_(grouped_prevs)
grouped_grads = grouped_prevs
torch._foreach_sign_(signs)
if capturable:
for sign in signs:
sign.copy_(torch.where(sign.gt(0), etaplus, sign))
sign.copy_(torch.where(sign.lt(0), etaminus, sign))
sign.copy_(torch.where(sign.eq(0), 1, sign))
else:
for sign in signs:
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
torch._foreach_mul_(grouped_step_sizes, signs)
for step_size in grouped_step_sizes:
step_size.clamp_(step_size_min, step_size_max)
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
grouped_grads = list(grouped_grads)
for i in range(len(grouped_grads)):
grouped_grads[i].copy_(
torch.where(signs[i].eq(etaminus), 0, grouped_grads[i])
)
# explicitly del signs as it's not used after here to save memory
del signs
# update parameters
grad_signs = [grad.sign() for grad in grouped_grads]
torch._foreach_addcmul_(
grouped_params, grad_signs, grouped_step_sizes, value=-1
)
# Logically, you may expect grouped_prevs to get updated to grouped_grads, but that's
# basically already happened since we've been using grouped_prevs' memory to store
# updated grouped_grads!
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rprop)
def rprop(
params: list[Tensor],
grads: list[Tensor],
prevs: list[Tensor],
step_sizes: list[Tensor],
state_steps: list[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
capturable: bool = False,
maximize: bool = False,
differentiable: bool = False,
has_complex: bool = False,
*,
step_size_min: float,
step_size_max: float,
etaminus: float,
etaplus: float,
) -> None:
r"""Functional API that performs rprop algorithm computation.
See :class:`~torch.optim.Rprop` for details.
"""
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
if not torch.compiler.is_compiling() and not all(
isinstance(t, torch.Tensor) for t in state_steps
):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
_, foreach = _default_to_fused_or_foreach(
params, differentiable, use_fused=False
)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_rprop
else:
func = _single_tensor_rprop
func(
params,
grads,
prevs,
step_sizes,
state_steps,
step_size_min=step_size_min,
step_size_max=step_size_max,
etaminus=etaminus,
etaplus=etaplus,
capturable=capturable,
maximize=maximize,
differentiable=differentiable,
has_complex=has_complex,
)
| Rprop |
python | bokeh__bokeh | src/bokeh/core/property/alias.py | {
"start": 2639,
"end": 3666
} | class ____(Alias[T]):
"""
Alias of another property of a model showing a deprecation message when used.
"""
def __init__(self, aliased_name: str, *, since: Version,
extra: str | None = None, help: str | None = None) -> None:
super().__init__(aliased_name, help=help)
self.since = since
self.extra = extra
def make_descriptors(self, base_name: str) -> list[PropertyDescriptor[T]]:
return [ DeprecatedAliasPropertyDescriptor(base_name, self) ]
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| DeprecatedAlias |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 12237,
"end": 12318
} | class ____(SpaceToBatchSpaceToDepth, CppOpImpl):
pass
| SpaceToBatchSpaceToDepthCpp |
python | wandb__wandb | wandb/vendor/pygments/lexers/haskell.py | {
"start": 13366,
"end": 18941
} | class ____(RegexLexer):
"""
FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report.
.. versionadded:: 2.0
"""
name = 'Cryptol'
aliases = ['cryptol', 'cry']
filenames = ['*.cry']
mimetypes = ['text/x-cryptol']
reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else',
'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2',
'max', 'min', 'module', 'newtype', 'pragma', 'property',
'then', 'type', 'where', 'width')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
# (r'--\s*|.*$', Comment.Doc),
(r'//.*$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r"'?[_a-z][\w']*", Name),
(r"('')?[A-Z][\w\']*", Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[\w.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z]\w*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
# TODO: these don't match the comments in docs, remove.
#(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
#(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
'comment': [
# Multiline Comments
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width',
'length', 'tail', '<<', '>>', '<<<', '>>>', 'const',
'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error',
'trace'))
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
else:
yield index, token, value
| CryptolLexer |
python | bokeh__bokeh | src/bokeh/models/widgets/groups.py | {
"start": 3718,
"end": 4077
} | class ____(ToggleButtonGroup):
''' A group of check boxes rendered as toggle buttons.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
active = List(Int, help="""
The list of indices of selected check boxes.
""")
| CheckboxButtonGroup |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 25579,
"end": 27350
} | class ____(spack.package_base.PackageBase):
"""Build system base class for packages that do not use a specific build system. It adds the
``build_system=generic`` variant to the package.
This is the only build system base class defined in Spack core. All other build systems
are defined in the builtin package repository :mod:`spack_repo.builtin.build_systems`.
The associated builder is :class:`GenericBuilder`, which is only necessary when the package
has multiple build systems.
Example::
from spack.package import *
class MyPackage(Package):
\"\"\"A package that does not use a specific build system.\"\"\"
homepage = "https://example.com/mypackage"
url = "https://example.com/mypackage-1.0.tar.gz"
version("1.0", sha256="...")
def install(self, spec: Spec, prefix: Prefix) -> None:
# Custom installation logic here
pass
.. note::
The difference between :class:`Package` and :class:`~spack.package_base.PackageBase` is that
:class:`~spack.package_base.PackageBase` is the universal base class for all package
classes, no matter their build system.
The :class:`Package` class is a *build system base class*, similar to
``CMakePackage``, and ``AutotoolsPackage``. It is called ``Package`` and not
``GenericPackage`` for legacy reasons.
"""
#: This attribute is used in UI queries that require to know which
#: build-system class we are using
build_system_class = "Package"
#: Legacy buildsystem attribute used to deserialize and install old specs
default_buildsystem = "generic"
spack.directives.build_system("generic")
@register_builder("generic")
| Package |
python | wandb__wandb | wandb/integration/openai/resolver.py | {
"start": 309,
"end": 466
} | class ____:
elapsed_time: float = None
prompt_tokens: int = None
completion_tokens: int = None
total_tokens: int = None
@dataclass
| UsageMetrics |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/resources.py | {
"start": 18346,
"end": 28312
} | class ____(ConfigurableResource):
"""This class represents a Airbyte workspace and provides utilities
to interact with Airbyte APIs.
"""
request_max_retries: int = Field(
default=3,
description=(
"The maximum number of times requests to the Airbyte API should be retried "
"before failing."
),
)
request_retry_delay: float = Field(
default=0.25,
description="Time (in seconds) to wait between each request retry.",
)
request_timeout: int = Field(
default=15,
description="Time (in seconds) after which the requests to Airbyte are declared timed out.",
)
max_items_per_page: int = Field(
default=100,
description=(
"The maximum number of items per page. "
"Used for paginated resources like connections, destinations, etc. "
),
)
poll_interval: float = Field(
default=DEFAULT_POLL_INTERVAL_SECONDS,
description="The time (in seconds) that will be waited between successive polls.",
)
poll_timeout: Optional[float] = Field(
default=None,
description=(
"The maximum time that will wait before this operation is timed "
"out. By default, this will never time out."
),
)
cancel_on_termination: bool = Field(
default=True,
description=(
"Whether to cancel a sync in Airbyte if the Dagster runner is terminated. "
"This may be useful to disable if using Airbyte sources that cannot be cancelled and "
"resumed easily, or if your Dagster deployment may experience runner interruptions "
"that do not impact your Airbyte deployment."
),
)
poll_previous_running_sync: bool = Field(
default=False,
description=(
"If set to True, Dagster will check for previous running sync for the same connection "
"and begin polling it instead of starting a new sync."
),
)
_client: AirbyteClient = PrivateAttr(default=None) # type: ignore
@cached_method
def fetch_airbyte_workspace_data(
self,
) -> AirbyteWorkspaceData:
"""Retrieves all Airbyte content from the workspace and returns it as a AirbyteWorkspaceData object.
Returns:
AirbyteWorkspaceData: A snapshot of the Airbyte workspace's content.
"""
connections_by_id = {}
destinations_by_id = {}
client = self.get_client()
client.validate_workspace_id()
connections = client.get_connections()
for partial_connection_details in connections:
full_connection_details = client.get_connection_details(
connection_id=partial_connection_details["connectionId"]
)
connection = AirbyteConnection.from_connection_details(
connection_details=full_connection_details
)
connections_by_id[connection.id] = connection
destination_details = client.get_destination_details(
destination_id=connection.destination_id
)
destination = AirbyteDestination.from_destination_details(
destination_details=destination_details
)
destinations_by_id[destination.id] = destination
return AirbyteWorkspaceData(
connections_by_id=connections_by_id,
destinations_by_id=destinations_by_id,
)
@cached_method
def load_asset_specs(
self,
dagster_airbyte_translator: Optional[DagsterAirbyteTranslator] = None,
connection_selector_fn: Optional[Callable[[AirbyteConnection], bool]] = None,
) -> Sequence[AssetSpec]:
"""Returns a list of AssetSpecs representing the Airbyte content in the workspace.
Args:
dagster_airbyte_translator (Optional[DagsterAirbyteTranslator], optional): The translator to use
to convert Airbyte content into :py:class:`dagster.AssetSpec`.
Defaults to :py:class:`DagsterAirbyteTranslator`.
connection_selector_fn (Optional[Callable[[AirbyteConnection], bool]]): A function that allows for filtering
which Airbyte connection assets are created for.
Returns:
List[AssetSpec]: The set of assets representing the Airbyte content in the workspace.
Examples:
Loading the asset specs for a given Airbyte workspace:
.. code-block:: python
from dagster_airbyte import AirbyteWorkspace
import dagster as dg
airbyte_workspace = AirbyteWorkspace(
workspace_id=dg.EnvVar("AIRBYTE_WORKSPACE_ID"),
client_id=dg.EnvVar("AIRBYTE_CLIENT_ID"),
client_secret=dg.EnvVar("AIRBYTE_CLIENT_SECRET"),
)
airbyte_specs = airbyte_workspace.load_asset_specs()
dg.Definitions(assets=airbyte_specs, resources={"airbyte": airbyte_workspace})
"""
dagster_airbyte_translator = dagster_airbyte_translator or DagsterAirbyteTranslator()
return load_airbyte_asset_specs(
workspace=self,
dagster_airbyte_translator=dagster_airbyte_translator,
connection_selector_fn=connection_selector_fn,
)
def _generate_materialization(
self,
airbyte_output: AirbyteOutput,
dagster_airbyte_translator: DagsterAirbyteTranslator,
):
connection = AirbyteConnection.from_connection_details(
connection_details=airbyte_output.connection_details
)
for stream in connection.streams.values():
if stream.selected:
connection_table_name = get_airbyte_connection_table_name(
stream_prefix=connection.stream_prefix,
stream_name=stream.name,
)
stream_asset_spec = dagster_airbyte_translator.get_asset_spec(
props=AirbyteConnectionTableProps(
table_name=connection_table_name,
stream_prefix=connection.stream_prefix,
stream_name=stream.name,
json_schema=stream.json_schema,
connection_id=connection.id,
connection_name=connection.name,
destination_type=None,
database=None,
schema=None,
)
)
yield AssetMaterialization(
asset_key=stream_asset_spec.key,
description=(
f"Table generated via Airbyte sync "
f"for connection {connection.name}: {connection_table_name}"
),
metadata=stream_asset_spec.metadata,
)
@public
@beta
def sync_and_poll(self, context: AssetExecutionContext):
"""Executes a sync and poll process to materialize Airbyte assets.
This method can only be used in the context of an asset execution.
Args:
context (AssetExecutionContext): The execution context
from within `@airbyte_assets`.
Returns:
Iterator[Union[AssetMaterialization, MaterializeResult]]: An iterator of MaterializeResult
or AssetMaterialization.
"""
assets_def = context.assets_def
dagster_airbyte_translator = get_translator_from_airbyte_assets(assets_def)
connection_id = next(
check.not_none(AirbyteMetadataSet.extract(spec.metadata).connection_id)
for spec in assets_def.specs
)
client = self.get_client()
airbyte_output = client.sync_and_poll(
connection_id=connection_id,
)
materialized_asset_keys = set()
for materialization in self._generate_materialization(
airbyte_output=airbyte_output, dagster_airbyte_translator=dagster_airbyte_translator
):
# Scan through all tables actually created, if it was expected then emit a MaterializeResult.
# Otherwise, emit a runtime AssetMaterialization.
if materialization.asset_key in context.selected_asset_keys:
yield MaterializeResult(
asset_key=materialization.asset_key, metadata=materialization.metadata
)
materialized_asset_keys.add(materialization.asset_key)
else:
context.log.warning(
f"An unexpected asset was materialized: {materialization.asset_key}. "
f"Yielding a materialization event."
)
yield materialization
unmaterialized_asset_keys = context.selected_asset_keys - materialized_asset_keys
if unmaterialized_asset_keys:
context.log.warning(f"Assets were not materialized: {unmaterialized_asset_keys}")
@contextmanager
def process_config_and_initialize_cm_cached(self) -> Iterator["AirbyteWorkspace"]:
# Hack to avoid reconstructing initialized copies of this resource, which invalidates
# @cached_method caches. This means that multiple calls to load_airbyte_asset_specs
# will not trigger multiple API calls to fetch the workspace data.
# Bespoke impl since @cached_method doesn't play nice with iterators; it's exhausted after
# the first call.
if hasattr(self, "_initialized"):
yield getattr(self, "_initialized")
else:
with self.process_config_and_initialize_cm() as initialized_workspace:
initialized = initialized_workspace
setattr(self, "_initialized", initialized)
yield initialized
@beta
| BaseAirbyteWorkspace |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 14962,
"end": 17399
} | class ____(SphinxDirective):
"""Directive to mark description of a new module."""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'no-index': directives.flag,
'no-index-entry': directives.flag,
'no-contents-entry': directives.flag,
'no-typesetting': directives.flag,
'noindex': directives.flag,
'nocontentsentry': directives.flag,
'deprecated': directives.flag,
}
def run(self) -> list[Node]:
# Copy old option names to new ones
# xref RemovedInSphinx90Warning
# # deprecate noindex in Sphinx 9.0
if 'no-index' not in self.options and 'noindex' in self.options:
self.options['no-index'] = self.options['noindex']
domain = self.env.domains.python_domain
modname = self.arguments[0].strip()
no_index = 'no-index' in self.options
self.env.ref_context['py:module'] = modname
content_nodes = self.parse_content_to_nodes(allow_section_headings=True)
ret: list[Node] = []
if not no_index:
# note module to the domain
node_id = make_id(self.env, self.state.document, 'module', modname)
target = nodes.target('', '', ids=[node_id], ismod=True)
self.set_source_info(target)
self.state.document.note_explicit_target(target)
domain.note_module(
name=modname,
node_id=node_id,
synopsis=self.options.get('synopsis', ''),
platform=self.options.get('platform', ''),
deprecated='deprecated' in self.options,
)
domain.note_object(modname, 'module', node_id, location=target)
# the platform and synopsis aren't printed; in fact, they are only
# used in the modindex currently
if 'no-index-entry' not in self.options:
index_text = f'module; {modname}'
inode = addnodes.index(
entries=[('pair', index_text, node_id, '', None)]
)
# The node order is: index node first, then target node.
ret.append(inode)
ret.append(target)
ret.extend(content_nodes)
return ret
| PyModule |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_pool_test.py | {
"start": 46440,
"end": 47525
} | class ____(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
test.assertEqual(file_desc, field_desc.file)
@testing_refleaks.TestCase
| ExtensionField |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum1.py | {
"start": 4546,
"end": 5129
} | class ____(Enum):
_other1: int
_other2: int
def __new__(cls, value: str, other1: int, other2: int):
obj = object.__new__(cls)
obj._value_ = value
obj._other1 = other1
obj._other2 = other2
return obj
A = ("a", 1, 2)
B = ("b", 2, 3)
te9_A = TestEnum9.A
reveal_type(te9_A, expected_text="Literal[TestEnum9.A]")
reveal_type(te9_A.value, expected_text="Any")
reveal_type(te9_A._value_, expected_text="Any")
reveal_type(te9_A.name, expected_text="Literal['A']")
reveal_type(te9_A._name_, expected_text="Literal['A']")
| TestEnum9 |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_releases.py | {
"start": 2153,
"end": 3410
} | class ____(BokehDirective):
def run(self):
srcdir = self.env.app.srcdir
versions = [x.rstrip(".rst") for x in listdir(join(srcdir, "docs", "releases")) if x.endswith(".rst")]
versions.sort(key=V, reverse=True)
rst = []
for version in versions:
try:
hashes = get_sri_hashes_for_version(version)
table = sorted(hashes.items())
except ValueError:
if version == __version__:
raise RuntimeError(f"Missing SRI Hash for full release version {version!r}")
table = []
rst.append(RELEASE_DETAIL.render(version=version, table=table))
return self.parse("\n".join(rst), "<bokeh-releases>")
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive("bokeh-releases", BokehReleases)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| BokehReleases |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 2002,
"end": 3139
} | class ____(torch.autograd.Function):
# Note that forward, setup_context, and backward are @staticmethods
@staticmethod
def forward(input, weight, bias):
output = input.mm(weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
# inputs is a Tuple of all of the inputs passed to forward.
# output is the output of the forward().
def setup_context(ctx, inputs, output):
input, weight, bias = inputs
ctx.save_for_backward(input, weight, bias)
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias
| LinearFunction |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/dfa/lstar.py | {
"start": 3644,
"end": 4648
} | class ____:
"""Relevant information for a state that we have witnessed as definitely
distinct from ones we have previously seen so far."""
# Index of this state in the learner's list of states
index: int
# A string that witnesses this state (i.e. when starting from the origin
# and following this string you will end up in this state).
label: str
# A boolean as to whether this is an accepting state.
accepting: bool
# A list of experiments that it is necessary to run to determine whether
# a string is in this state. This is stored as a dict mapping experiments
# to their expected result. A string is only considered to lead to this
# state if ``all(learner.member(s + experiment) == result for experiment,
# result in self.experiments.items())``.
experiments: dict
# A cache of transitions out of this state, mapping bytes to the states
# that they lead to.
transitions: dict = field(default_factory=dict)
| DistinguishedState |
python | HypothesisWorks__hypothesis | whole_repo_tests/types/revealed_types.py | {
"start": 2069,
"end": 5971
} | class ____(NamedTuple):
value: str
mypy: str
pyright: str
DIFF_REVEALED_TYPES = [
DifferingRevealedTypes("none() | integers()", "None | int", "int | None"),
DifferingRevealedTypes(
"data()", "hypothesis.strategies._internal.core.DataObject", "DataObject"
),
# We have overloads for up to five types, then fall back to Any.
# (why five? JSON atoms are None|bool|int|float|str and we do that a lot)
DifferingRevealedTypes(
"one_of(integers(), text(), none(), binary(), builds(list))",
"int | str | None | bytes | list[Never]",
"int | str | bytes | list[Unknown] | None",
),
DifferingRevealedTypes(
"dictionaries(integers(), datetimes())",
"dict[int, datetime.datetime]",
"dict[int, datetime]",
),
]
NUMPY_REVEALED_TYPES = [
(
'arrays(dtype=np.dtype("int32"), shape=1)',
"ndarray[tuple[int, ...], dtype[signedinteger[_32Bit]]]",
),
# (
# "arrays(dtype=np.dtype(int), shape=1)",
# "ndarray[tuple[int, ...], dtype[Union[signedinteger[Union[_32Bit, _64Bit]], bool[bool]]]]",
# # FIXME: `dtype[signedinteger[_32Bit | _64Bit] | bool[bool]]]]` on mypy now
# ),
(
"boolean_dtypes()",
"dtype[bool[bool]]", # np.bool[builtins.bool]
),
(
"unsigned_integer_dtypes(sizes=8)",
"dtype[unsignedinteger[_8Bit]]",
),
(
"unsigned_integer_dtypes(sizes=16)",
"dtype[unsignedinteger[_16Bit]]",
),
(
"unsigned_integer_dtypes(sizes=32)",
"dtype[unsignedinteger[_32Bit]]",
),
(
"unsigned_integer_dtypes(sizes=64)",
"dtype[unsignedinteger[_64Bit]]",
),
(
"unsigned_integer_dtypes()",
"dtype[unsignedinteger[Any]]",
),
(
"unsigned_integer_dtypes(sizes=(8, 16))",
"dtype[unsignedinteger[Any]]",
),
(
"integer_dtypes(sizes=8)",
"dtype[signedinteger[_8Bit]]",
),
(
"integer_dtypes(sizes=16)",
"dtype[signedinteger[_16Bit]]",
),
(
"integer_dtypes(sizes=32)",
"dtype[signedinteger[_32Bit]]",
),
(
"integer_dtypes(sizes=64)",
"dtype[signedinteger[_64Bit]]",
),
(
"integer_dtypes()",
"dtype[signedinteger[Any]]",
),
(
"integer_dtypes(sizes=(8, 16))",
"dtype[signedinteger[Any]]",
),
(
"floating_dtypes(sizes=16)",
"dtype[floating[_16Bit]]",
),
(
"floating_dtypes(sizes=32)",
"dtype[floating[_32Bit]]",
),
(
"floating_dtypes(sizes=64)",
"dtype[float64]",
),
(
"floating_dtypes(sizes=128)",
"dtype[floating[_128Bit]]",
),
(
"floating_dtypes()",
"dtype[floating[Any]]",
),
(
"floating_dtypes(sizes=(16, 32))",
"dtype[floating[Any]]",
),
(
"complex_number_dtypes(sizes=64)",
"dtype[complexfloating[_32Bit, _32Bit]]",
),
(
"complex_number_dtypes(sizes=128)",
"dtype[complex128]",
),
(
"complex_number_dtypes(sizes=256)",
"dtype[complexfloating[_128Bit, _128Bit]]",
),
(
"complex_number_dtypes()",
"dtype[complexfloating[Any, Any]]",
),
(
"complex_number_dtypes(sizes=(64, 128))",
"dtype[complexfloating[Any, Any]]",
),
(
"integer_array_indices(shape=(2, 3))",
"tuple[ndarray[tuple[int, ...], dtype[signedinteger[Any]]], ...]",
),
(
'integer_array_indices(shape=(2, 3), dtype=np.dtype("int32"))',
"tuple[ndarray[tuple[int, ...], dtype[signedinteger[_32Bit]]], ...]",
),
(
'integer_array_indices(shape=(2, 3), dtype=np.dtype("uint8"))',
"tuple[ndarray[tuple[int, ...], dtype[unsignedinteger[_8Bit]]], ...]",
),
]
| DifferingRevealedTypes |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 109126,
"end": 109993
} | class ____(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('amd_libs', 'libraries')
amd_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
| amd_info |
python | aio-libs__aiohttp | tests/test_http_exceptions.py | {
"start": 120,
"end": 1454
} | class ____:
def test_ctor(self) -> None:
err = http_exceptions.HttpProcessingError(
code=500, message="Internal error", headers=CIMultiDict()
)
assert err.code == 500
assert err.message == "Internal error"
assert err.headers == CIMultiDict()
def test_pickle(self) -> None:
err = http_exceptions.HttpProcessingError(
code=500, message="Internal error", headers=CIMultiDict()
)
err.foo = "bar" # type: ignore[attr-defined]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(err, proto)
err2 = pickle.loads(pickled)
assert err2.code == 500
assert err2.message == "Internal error"
assert err2.headers == CIMultiDict()
assert err2.foo == "bar"
def test_str(self) -> None:
err = http_exceptions.HttpProcessingError(
code=500, message="Internal error", headers=CIMultiDict()
)
assert str(err) == "500, message:\n Internal error"
def test_repr(self) -> None:
err = http_exceptions.HttpProcessingError(
code=500, message="Internal error", headers=CIMultiDict()
)
assert repr(err) == ("<HttpProcessingError: 500, message='Internal error'>")
| TestHttpProcessingError |
python | kubernetes-client__python | kubernetes/client/models/v1_cluster_role_binding.py | {
"start": 383,
"end": 7815
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'role_ref': 'V1RoleRef',
'subjects': 'list[RbacV1Subject]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'role_ref': 'roleRef',
'subjects': 'subjects'
}
def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None, local_vars_configuration=None): # noqa: E501
"""V1ClusterRoleBinding - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._role_ref = None
self._subjects = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.role_ref = role_ref
if subjects is not None:
self.subjects = subjects
@property
def api_version(self):
"""Gets the api_version of this V1ClusterRoleBinding. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ClusterRoleBinding. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ClusterRoleBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ClusterRoleBinding. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ClusterRoleBinding. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ClusterRoleBinding. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ClusterRoleBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ClusterRoleBinding. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ClusterRoleBinding. # noqa: E501
:return: The metadata of this V1ClusterRoleBinding. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ClusterRoleBinding.
:param metadata: The metadata of this V1ClusterRoleBinding. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def role_ref(self):
"""Gets the role_ref of this V1ClusterRoleBinding. # noqa: E501
:return: The role_ref of this V1ClusterRoleBinding. # noqa: E501
:rtype: V1RoleRef
"""
return self._role_ref
@role_ref.setter
def role_ref(self, role_ref):
"""Sets the role_ref of this V1ClusterRoleBinding.
:param role_ref: The role_ref of this V1ClusterRoleBinding. # noqa: E501
:type: V1RoleRef
"""
if self.local_vars_configuration.client_side_validation and role_ref is None: # noqa: E501
raise ValueError("Invalid value for `role_ref`, must not be `None`") # noqa: E501
self._role_ref = role_ref
@property
def subjects(self):
"""Gets the subjects of this V1ClusterRoleBinding. # noqa: E501
Subjects holds references to the objects the role applies to. # noqa: E501
:return: The subjects of this V1ClusterRoleBinding. # noqa: E501
:rtype: list[RbacV1Subject]
"""
return self._subjects
@subjects.setter
def subjects(self, subjects):
"""Sets the subjects of this V1ClusterRoleBinding.
Subjects holds references to the objects the role applies to. # noqa: E501
:param subjects: The subjects of this V1ClusterRoleBinding. # noqa: E501
:type: list[RbacV1Subject]
"""
self._subjects = subjects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClusterRoleBinding):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClusterRoleBinding):
return True
return self.to_dict() != other.to_dict()
| V1ClusterRoleBinding |
python | doocs__leetcode | solution/3500-3599/3561.Resulting String After Adjacent Removals/Solution.py | {
"start": 0,
"end": 263
} | class ____:
def resultingString(self, s: str) -> str:
stk = []
for c in s:
if stk and abs(ord(c) - ord(stk[-1])) in (1, 25):
stk.pop()
else:
stk.append(c)
return "".join(stk)
| Solution |
python | imageio__imageio | imageio/plugins/_tifffile.py | {
"start": 71110,
"end": 115380
} | class ____(object):
"""Read image and metadata from TIFF file.
TiffFile instances must be closed using the 'close' method, which is
automatically called when using the 'with' context manager.
Attributes
----------
pages : TiffPages
Sequence of TIFF pages in file.
series : list of TiffPageSeries
Sequences of closely related TIFF pages. These are computed
from OME, LSM, ImageJ, etc. metadata or based on similarity
of page properties such as shape, dtype, and compression.
byteorder : '>', '<'
The endianness of data in the file.
'>': big-endian (Motorola).
'>': little-endian (Intel).
is_flag : bool
If True, file is of a certain format.
Flags are: bigtiff, movie, shaped, ome, imagej, stk, lsm, fluoview,
nih, vista, 'micromanager, metaseries, mdgel, mediacy, tvips, fei,
sem, scn, svs, scanimage, andor, epics, pilatus, qptiff.
All attributes are read-only.
Examples
--------
>>> # read image array from TIFF file
>>> imsave('temp.tif', numpy.random.rand(5, 301, 219))
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
>>> data.shape
(5, 301, 219)
"""
def __init__(
self,
arg,
name=None,
offset=None,
size=None,
multifile=True,
movie=None,
**kwargs,
):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default, this is
the current file position.
size : int
Optional size of embedded file. By default, this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
movie : bool
If True, assume that later pages differ from first page only by
data offsets and byte counts. Significantly increases speed and
reduces memory usage when reading movies with thousands of pages.
Enabling this for non-movie files will result in data corruption
or crashes. Python 3 only.
kwargs : bool
'is_ome': If False, disable processing of OME-XML metadata.
"""
if "fastij" in kwargs:
del kwargs["fastij"]
raise DeprecationWarning("the fastij option will be removed")
for key, value in kwargs.items():
if key[:3] == "is_" and key[3:] in TIFF.FILE_FLAGS:
if value is not None and not value:
setattr(self, key, bool(value))
else:
raise TypeError("unexpected keyword argument: %s" % key)
fh = FileHandle(arg, mode="rb", name=name, offset=offset, size=size)
self._fh = fh
self._multifile = bool(multifile)
self._files = {fh.name: self} # cache of TiffFiles
try:
fh.seek(0)
try:
byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)]
except KeyError:
raise ValueError("not a TIFF file")
sys_byteorder = {"big": ">", "little": "<"}[sys.byteorder]
self.isnative = byteorder == sys_byteorder
version = struct.unpack(byteorder + "H", fh.read(2))[0]
if version == 43:
# BigTiff
self.is_bigtiff = True
offsetsize, zero = struct.unpack(byteorder + "HH", fh.read(4))
if zero or offsetsize != 8:
raise ValueError("invalid BigTIFF file")
self.byteorder = byteorder
self.offsetsize = 8
self.offsetformat = byteorder + "Q"
self.tagnosize = 8
self.tagnoformat = byteorder + "Q"
self.tagsize = 20
self.tagformat1 = byteorder + "HH"
self.tagformat2 = byteorder + "Q8s"
elif version == 42:
self.is_bigtiff = False
self.byteorder = byteorder
self.offsetsize = 4
self.offsetformat = byteorder + "I"
self.tagnosize = 2
self.tagnoformat = byteorder + "H"
self.tagsize = 12
self.tagformat1 = byteorder + "HH"
self.tagformat2 = byteorder + "I4s"
else:
raise ValueError("invalid TIFF file")
# file handle is at offset to offset to first page
self.pages = TiffPages(self)
if self.is_lsm and (
self.filehandle.size >= 2**32
or self.pages[0].compression != 1
or self.pages[1].compression != 1
):
self._lsm_load_pages()
self._lsm_fix_strip_offsets()
self._lsm_fix_strip_bytecounts()
elif movie:
self.pages.useframes = True
except Exception:
fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
@lazyattr
def fstat(self):
"""Return status of file handle as stat_result object."""
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif.filehandle.close()
self._files = {}
def asarray(self, key=None, series=None, out=None, validate=True, maxworkers=1):
"""Return image data from multiple TIFF pages as numpy array.
By default, the data from the first series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int or TiffPageSeries
Defines which series of pages to return as array.
out : numpy.ndarray, str, or file-like object; optional
Buffer where image data will be saved.
If None (default), a new array will be created.
If numpy.ndarray, a writable array of compatible dtype and shape.
If 'memmap', directly memory-map the image data in the TIFF file
if possible; else create a memory-mapped array in a temporary file.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
validate : bool
If True (default), validate various tags.
Passed to TiffPage.asarray().
maxworkers : int
Maximum number of threads to concurrently get data from pages.
Default is 1. If None, up to half the CPU cores are used.
Reading data from file is limited to a single thread.
Using multiple threads can significantly speed up this function
if the bottleneck is decoding compressed data, e.g. in case of
large LZW compressed LSM files.
If the bottleneck is I/O or pure Python code, using multiple
threads might be detrimental.
"""
if not self.pages:
return numpy.array([])
if key is None and series is None:
series = 0
if series is not None:
try:
series = self.series[series]
except (KeyError, TypeError):
pass
pages = series._pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, inttypes):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not pages:
raise ValueError("no pages selected")
if self.is_nih:
result = stack_pages(pages, out=out, maxworkers=maxworkers, squeeze=False)
elif key is None and series and series.offset:
typecode = self.byteorder + series.dtype.char
if out == "memmap" and pages[0].is_memmappable:
result = self.filehandle.memmap_array(
typecode, series.shape, series.offset
)
else:
if out is not None:
out = create_output(out, series.shape, series.dtype)
self.filehandle.seek(series.offset)
result = self.filehandle.read_array(
typecode, product(series.shape), out=out, native=True
)
elif len(pages) == 1:
result = pages[0].asarray(out=out, validate=validate)
else:
result = stack_pages(pages, out=out, maxworkers=maxworkers)
if result is None:
return
if key is None:
try:
result.shape = series.shape
except ValueError:
try:
warnings.warn(
"failed to reshape %s to %s" % (result.shape, series.shape)
)
# try series of expected shapes
result.shape = (-1,) + series.shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
elif len(pages) == 1:
result.shape = pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
@lazyattr
def series(self):
"""Return related pages as TiffPageSeries.
Side effect: after calling this function, TiffFile.pages might contain
TiffPage and TiffFrame instances.
"""
if not self.pages:
return []
useframes = self.pages.useframes
keyframe = self.pages.keyframe
series = []
for name in "ome imagej lsm fluoview nih mdgel shaped".split():
if getattr(self, "is_" + name, False):
series = getattr(self, "_%s_series" % name)()
break
self.pages.useframes = useframes
self.pages.keyframe = keyframe
if not series:
series = self._generic_series()
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
for i, s in enumerate(series):
s.index = i
return series
def _generic_series(self):
"""Return image series in file."""
if self.pages.useframes:
# movie mode
page = self.pages[0]
shape = page.shape
axes = page.axes
if len(self.pages) > 1:
shape = (len(self.pages),) + shape
axes = "I" + axes
return [
TiffPageSeries(self.pages[:], shape, page.dtype, axes, stype="movie")
]
self.pages.clear(False)
self.pages.load()
result = []
keys = []
series = {}
compressions = TIFF.DECOMPESSORS
for page in self.pages:
if not page.shape:
continue
key = page.shape + (page.axes, page.compression in compressions)
if key in series:
series[key].append(page)
else:
keys.append(key)
series[key] = [page]
for key in keys:
pages = series[key]
page = pages[0]
shape = page.shape
axes = page.axes
if len(pages) > 1:
shape = (len(pages),) + shape
axes = "I" + axes
result.append(
TiffPageSeries(pages, shape, page.dtype, axes, stype="Generic")
)
return result
def _shaped_series(self):
"""Return image series in "shaped" file."""
pages = self.pages
pages.useframes = True
lenpages = len(pages)
def append_series(series, pages, axes, shape, reshape, name, truncated):
page = pages[0]
if not axes:
shape = page.shape
axes = page.axes
if len(pages) > 1:
shape = (len(pages),) + shape
axes = "Q" + axes
size = product(shape)
resize = product(reshape)
if page.is_contiguous and resize > size and resize % size == 0:
if truncated is None:
truncated = True
axes = "Q" + axes
shape = (resize // size,) + shape
try:
axes = reshape_axes(axes, shape, reshape)
shape = reshape
except ValueError as e:
warnings.warn(str(e))
series.append(
TiffPageSeries(
pages,
shape,
page.dtype,
axes,
name=name,
stype="Shaped",
truncated=truncated,
)
)
keyframe = axes = shape = reshape = name = None
series = []
index = 0
while True:
if index >= lenpages:
break
# new keyframe; start of new series
pages.keyframe = index
keyframe = pages[index]
if not keyframe.is_shaped:
warnings.warn("invalid shape metadata or corrupted file")
return
# read metadata
axes = None
shape = None
metadata = json_description_metadata(keyframe.is_shaped)
name = metadata.get("name", "")
reshape = metadata["shape"]
truncated = metadata.get("truncated", None)
if "axes" in metadata:
axes = metadata["axes"]
if len(axes) == len(reshape):
shape = reshape
else:
axes = ""
warnings.warn("axes do not match shape")
# skip pages if possible
spages = [keyframe]
size = product(reshape)
npages, mod = divmod(size, product(keyframe.shape))
if mod:
warnings.warn("series shape does not match page shape")
return
if 1 < npages <= lenpages - index:
size *= keyframe._dtype.itemsize
if truncated:
npages = 1
elif (
keyframe.is_final
and keyframe.offset + size < pages[index + 1].offset
):
truncated = False
else:
# need to read all pages for series
truncated = False
for j in range(index + 1, index + npages):
page = pages[j]
page.keyframe = keyframe
spages.append(page)
append_series(series, spages, axes, shape, reshape, name, truncated)
index += npages
return series
def _imagej_series(self):
"""Return image series in ImageJ file."""
# ImageJ's dimension order is always TZCYXS
# TODO: fix loading of color, composite, or palette images
self.pages.useframes = True
self.pages.keyframe = 0
ij = self.imagej_metadata
pages = self.pages
page = pages[0]
def is_hyperstack():
# ImageJ hyperstack store all image metadata in the first page and
# image data are stored contiguously before the second page, if any
if not page.is_final:
return False
images = ij.get("images", 0)
if images <= 1:
return False
offset, count = page.is_contiguous
if (
count != product(page.shape) * page.bitspersample // 8
or offset + count * images > self.filehandle.size
):
raise ValueError()
# check that next page is stored after data
if len(pages) > 1 and offset + count * images > pages[1].offset:
return False
return True
try:
hyperstack = is_hyperstack()
except ValueError:
warnings.warn("invalid ImageJ metadata or corrupted file")
return
if hyperstack:
# no need to read other pages
pages = [page]
else:
self.pages.load()
shape = []
axes = []
if "frames" in ij:
shape.append(ij["frames"])
axes.append("T")
if "slices" in ij:
shape.append(ij["slices"])
axes.append("Z")
if "channels" in ij and not (
page.photometric == 2 and not ij.get("hyperstack", False)
):
shape.append(ij["channels"])
axes.append("C")
remain = ij.get("images", len(pages)) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append("I")
if page.axes[0] == "I":
# contiguous multiple images
shape.extend(page.shape[1:])
axes.extend(page.axes[1:])
elif page.axes[:2] == "SI":
# color-mapped contiguous multiple images
shape = page.shape[0:1] + tuple(shape) + page.shape[2:]
axes = list(page.axes[0]) + axes + list(page.axes[2:])
else:
shape.extend(page.shape)
axes.extend(page.axes)
truncated = (
hyperstack
and len(self.pages) == 1
and page.is_contiguous[1] != product(shape) * page.bitspersample // 8
)
return [
TiffPageSeries(
pages, shape, page.dtype, axes, stype="ImageJ", truncated=truncated
)
]
def _fluoview_series(self):
"""Return image series in FluoView file."""
self.pages.useframes = True
self.pages.keyframe = 0
self.pages.load()
mm = self.fluoview_metadata
mmhd = list(reversed(mm["Dimensions"]))
axes = "".join(
TIFF.MM_DIMENSIONS.get(i[0].upper(), "Q") for i in mmhd if i[1] > 1
)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
return [
TiffPageSeries(
self.pages,
shape,
self.pages[0].dtype,
axes,
name=mm["ImageName"],
stype="FluoView",
)
]
def _mdgel_series(self):
"""Return image series in MD Gel file."""
# only a single page, scaled according to metadata in second page
self.pages.useframes = False
self.pages.keyframe = 0
self.pages.load()
md = self.mdgel_metadata
if md["FileTag"] in (2, 128):
dtype = numpy.dtype("float32")
scale = md["ScalePixel"]
scale = scale[0] / scale[1] # rational
if md["FileTag"] == 2:
# squary root data format
def transform(a):
return a.astype("float32") ** 2 * scale
else:
def transform(a):
return a.astype("float32") * scale
else:
transform = None
page = self.pages[0]
return [
TiffPageSeries(
[page], page.shape, dtype, page.axes, transform=transform, stype="MDGel"
)
]
def _nih_series(self):
"""Return image series in NIH file."""
self.pages.useframes = True
self.pages.keyframe = 0
self.pages.load()
page0 = self.pages[0]
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = "I" + page0.axes
return [TiffPageSeries(self.pages, shape, page0.dtype, axes, stype="NIH")]
def _ome_series(self):
"""Return image series in OME-TIFF file(s)."""
from xml.etree import cElementTree as etree # delayed import
omexml = self.pages[0].description
try:
root = etree.fromstring(omexml)
except etree.ParseError as e:
# TODO: test badly encoded OME-XML
warnings.warn("ome-xml: %s" % e)
try:
# might work on Python 2
omexml = omexml.decode("utf-8", "ignore").encode("utf-8")
root = etree.fromstring(omexml)
except Exception:
return
self.pages.useframes = True
self.pages.keyframe = 0
self.pages.load()
uuid = root.attrib.get("UUID", None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
series = []
for element in root:
if element.tag.endswith("BinaryOnly"):
# TODO: load OME-XML from master or companion file
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith("StructuredAnnotations"):
for annot in element:
if not annot.attrib.get("Namespace", "").endswith("modulo"):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith("Along"):
continue
axis = along.tag[-1]
newaxis = along.attrib.get("Type", "other")
newaxis = TIFF.AXES_LABELS[newaxis]
if "Start" in along.attrib:
step = float(along.attrib.get("Step", 1))
start = float(along.attrib["Start"])
stop = float(along.attrib["End"]) + step
labels = numpy.arange(start, stop, step)
else:
labels = [
label.text
for label in along
if label.tag.endswith("Label")
]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith("Image"):
continue
attr = element.attrib
name = attr.get("Name", None)
for pixels in element:
if not pixels.tag.endswith("Pixels"):
continue
attr = pixels.attrib
dtype = attr.get("PixelType", None)
axes = "".join(reversed(attr["DimensionOrder"]))
shape = list(int(attr["Size" + ax]) for ax in axes)
size = product(shape[:-2])
ifds = None
spp = 1 # samples per pixel
# FIXME: this implementation assumes the last two
# dimensions are stored in tiff pages (shape[:-2]).
# Apparently that is not always the case.
for data in pixels:
if data.tag.endswith("Channel"):
attr = data.attrib
if ifds is None:
spp = int(attr.get("SamplesPerPixel", spp))
ifds = [None] * (size // spp)
elif int(attr.get("SamplesPerPixel", 1)) != spp:
raise ValueError("cannot handle differing SamplesPerPixel")
continue
if ifds is None:
ifds = [None] * (size // spp)
if not data.tag.endswith("TiffData"):
continue
attr = data.attrib
ifd = int(attr.get("IFD", 0))
num = int(attr.get("NumPlanes", 1 if "IFD" in attr else 0))
num = int(attr.get("PlaneCount", num))
idx = [int(attr.get("First" + ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith("UUID"):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib["FileName"]
try:
tif = TiffFile(os.path.join(dirname, fname))
tif.pages.useframes = True
tif.pages.keyframe = 0
tif.pages.load()
except (IOError, FileNotFoundError, ValueError):
warnings.warn("ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first UUID
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
# set a keyframe on all IFDs
keyframe = None
for i in ifds:
# try find a TiffPage
if i and i == i.keyframe:
keyframe = i
break
if not keyframe:
# reload a TiffPage from file
for i, keyframe in enumerate(ifds):
if keyframe:
keyframe.parent.pages.keyframe = keyframe.index
keyframe = keyframe.parent.pages[keyframe.index]
ifds[i] = keyframe
break
for i in ifds:
if i is not None:
i.keyframe = keyframe
dtype = keyframe.dtype
series.append(
TiffPageSeries(
ifds, shape, dtype, axes, parent=self, name=name, stype="OME"
)
)
for serie in series:
shape = list(serie.shape)
for axis, (newaxis, labels) in modulo.items():
i = serie.axes.index(axis)
size = len(labels)
if shape[i] == size:
serie.axes = serie.axes.replace(axis, newaxis, 1)
else:
shape[i] //= size
shape.insert(i + 1, size)
serie.axes = serie.axes.replace(axis, axis + newaxis, 1)
serie.shape = tuple(shape)
# squeeze dimensions
for serie in series:
serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes)
return series
def _lsm_series(self):
"""Return main image series in LSM file. Skip thumbnails."""
lsmi = self.lsm_metadata
axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi["ScanType"]]
if self.pages[0].photometric == 2: # RGB; more than one channel
axes = axes.replace("C", "").replace("XY", "XYC")
if lsmi.get("DimensionP", 0) > 1:
axes += "P"
if lsmi.get("DimensionM", 0) > 1:
axes += "M"
axes = axes[::-1]
shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes)
name = lsmi.get("Name", "")
self.pages.keyframe = 0
pages = self.pages[::2]
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSM")]
if self.pages[1].is_reduced:
self.pages.keyframe = 1
pages = self.pages[1::2]
dtype = pages[0].dtype
cp, i = 1, 0
while cp < len(pages) and i < len(shape) - 2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + "CYX"
series.append(
TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSMreduced")
)
return series
def _lsm_load_pages(self):
"""Load all pages from LSM file."""
self.pages.cache = True
self.pages.useframes = True
# second series: thumbnails
self.pages.keyframe = 1
keyframe = self.pages[1]
for page in self.pages[1::2]:
page.keyframe = keyframe
# first series: data
self.pages.keyframe = 0
keyframe = self.pages[0]
for page in self.pages[::2]:
page.keyframe = keyframe
def _lsm_fix_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB.
Each series and position require separate unwrapping (undocumented).
"""
if self.filehandle.size < 2**32:
return
pages = self.pages
npages = len(pages)
series = self.series[0]
axes = series.axes
# find positions
positions = 1
for i in 0, 1:
if series.axes[i] in "PM":
positions *= series.shape[i]
# make time axis first
if positions > 1:
ntimes = 0
for i in 1, 2:
if axes[i] == "T":
ntimes = series.shape[i]
break
if ntimes:
div, mod = divmod(npages, 2 * positions * ntimes)
assert mod == 0
shape = (positions, ntimes, div, 2)
indices = numpy.arange(product(shape)).reshape(shape)
indices = numpy.moveaxis(indices, 1, 0)
else:
indices = numpy.arange(npages).reshape(-1, 2)
# images of reduced page might be stored first
if pages[0].dataoffsets[0] > pages[1].dataoffsets[0]:
indices = indices[..., ::-1]
# unwrap offsets
wrap = 0
previousoffset = 0
for i in indices.flat:
page = pages[i]
dataoffsets = []
for currentoffset in page.dataoffsets:
if currentoffset < previousoffset:
wrap += 2**32
dataoffsets.append(currentoffset + wrap)
previousoffset = currentoffset
page.dataoffsets = tuple(dataoffsets)
def _lsm_fix_strip_bytecounts(self):
"""Set databytecounts to size of compressed data.
The StripByteCounts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
pages = self.pages
if pages[0].compression == 1:
return
# sort pages by first strip offset
pages = sorted(pages, key=lambda p: p.dataoffsets[0])
npages = len(pages) - 1
for i, page in enumerate(pages):
if page.index % 2:
continue
offsets = page.dataoffsets
bytecounts = page.databytecounts
if i < npages:
lastoffset = pages[i + 1].dataoffsets[0]
else:
# LZW compressed strips might be longer than uncompressed
lastoffset = min(offsets[-1] + 2 * bytecounts[-1], self._fh.size)
offsets = offsets + (lastoffset,)
page.databytecounts = tuple(
offsets[j + 1] - offsets[j] for j in range(len(bytecounts))
)
def __getattr__(self, name):
"""Return 'is_flag' attributes from first page."""
if name[3:] in TIFF.FILE_FLAGS:
if not self.pages:
return False
value = bool(getattr(self.pages[0], name))
setattr(self, name, value)
return value
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __str__(self, detail=0, width=79):
"""Return string containing information about file.
The detail parameter specifies the level of detail returned:
0: file only.
1: all series, first page of series and its tags.
2: large tag values and file metadata.
3: all pages.
"""
info = [
"TiffFile '%s'",
format_size(self._fh.size),
{"<": "LittleEndian", ">": "BigEndian"}[self.byteorder],
]
if self.is_bigtiff:
info.append("BigTiff")
info.append("|".join(f.upper() for f in self.flags))
if len(self.pages) > 1:
info.append("%i Pages" % len(self.pages))
if len(self.series) > 1:
info.append("%i Series" % len(self.series))
if len(self._files) > 1:
info.append("%i Files" % (len(self._files)))
info = " ".join(info)
info = info.replace(" ", " ").replace(" ", " ")
info = info % snipstr(self._fh.name, max(12, width + 2 - len(info)))
if detail <= 0:
return info
info = [info]
info.append("\n".join(str(s) for s in self.series))
if detail >= 3:
info.extend(
(
TiffPage.__str__(p, detail=detail, width=width)
for p in self.pages
if p is not None
)
)
else:
info.extend(
(
TiffPage.__str__(s.pages[0], detail=detail, width=width)
for s in self.series
if s.pages[0] is not None
)
)
if detail >= 2:
for name in sorted(self.flags):
if hasattr(self, name + "_metadata"):
m = getattr(self, name + "_metadata")
if m:
info.append(
"%s_METADATA\n%s"
% (
name.upper(),
pformat(m, width=width, height=detail * 12),
)
)
return "\n\n".join(info).replace("\n\n\n", "\n\n")
@lazyattr
def flags(self):
"""Return set of file flags."""
return set(
name.lower()
for name in sorted(TIFF.FILE_FLAGS)
if getattr(self, "is_" + name)
)
@lazyattr
def is_mdgel(self):
"""File has MD Gel format."""
try:
return self.pages[0].is_mdgel or self.pages[1].is_mdgel
except IndexError:
return False
@property
def is_movie(self):
"""Return if file is a movie."""
return self.pages.useframes
@lazyattr
def shaped_metadata(self):
"""Return Tifffile metadata from JSON descriptions as dicts."""
if not self.is_shaped:
return
return tuple(
json_description_metadata(s.pages[0].is_shaped)
for s in self.series
if s.stype.lower() == "shaped"
)
@lazyattr
def ome_metadata(self):
"""Return OME XML as dict."""
# TODO: remove this or return XML?
if not self.is_ome:
return
return xml2dict(self.pages[0].description)["OME"]
@lazyattr
def qptiff_metadata(self):
"""Return PerkinElmer-QPI-ImageDescription XML element as dict."""
if not self.is_qptiff:
return
root = "PerkinElmer-QPI-ImageDescription"
xml = self.pages[0].description.replace(" " + root + " ", root)
return xml2dict(xml)[root]
@lazyattr
def lsm_metadata(self):
"""Return LSM metadata from CZ_LSMINFO tag as dict."""
if not self.is_lsm:
return
return self.pages[0].tags["CZ_LSMINFO"].value
@lazyattr
def stk_metadata(self):
"""Return STK metadata from UIC tags as dict."""
if not self.is_stk:
return
page = self.pages[0]
tags = page.tags
result = {}
result["NumberPlanes"] = tags["UIC2tag"].count
if page.description:
result["PlaneDescriptions"] = page.description.split("\0")
# result['plane_descriptions'] = stk_description_metadata(
# page.image_description)
if "UIC1tag" in tags:
result.update(tags["UIC1tag"].value)
if "UIC3tag" in tags:
result.update(tags["UIC3tag"].value) # wavelengths
if "UIC4tag" in tags:
result.update(tags["UIC4tag"].value) # override uic1 tags
uic2tag = tags["UIC2tag"].value
result["ZDistance"] = uic2tag["ZDistance"]
result["TimeCreated"] = uic2tag["TimeCreated"]
result["TimeModified"] = uic2tag["TimeModified"]
try:
result["DatetimeCreated"] = numpy.array(
[
julian_datetime(*dt)
for dt in zip(uic2tag["DateCreated"], uic2tag["TimeCreated"])
],
dtype="datetime64[ns]",
)
result["DatetimeModified"] = numpy.array(
[
julian_datetime(*dt)
for dt in zip(uic2tag["DateModified"], uic2tag["TimeModified"])
],
dtype="datetime64[ns]",
)
except ValueError as e:
warnings.warn("stk_metadata: %s" % e)
return result
@lazyattr
def imagej_metadata(self):
"""Return consolidated ImageJ metadata as dict."""
if not self.is_imagej:
return
page = self.pages[0]
result = imagej_description_metadata(page.is_imagej)
if "IJMetadata" in page.tags:
try:
result.update(page.tags["IJMetadata"].value)
except Exception:
pass
return result
@lazyattr
def fluoview_metadata(self):
"""Return consolidated FluoView metadata as dict."""
if not self.is_fluoview:
return
result = {}
page = self.pages[0]
result.update(page.tags["MM_Header"].value)
# TODO: read stamps from all pages
result["Stamp"] = page.tags["MM_Stamp"].value
# skip parsing image description; not reliable
# try:
# t = fluoview_description_metadata(page.image_description)
# if t is not None:
# result['ImageDescription'] = t
# except Exception as e:
# warnings.warn(
# "failed to read FluoView image description: %s" % e)
return result
@lazyattr
def nih_metadata(self):
"""Return NIH Image metadata from NIHImageHeader tag as dict."""
if not self.is_nih:
return
return self.pages[0].tags["NIHImageHeader"].value
@lazyattr
def fei_metadata(self):
"""Return FEI metadata from SFEG or HELIOS tags as dict."""
if not self.is_fei:
return
tags = self.pages[0].tags
if "FEI_SFEG" in tags:
return tags["FEI_SFEG"].value
if "FEI_HELIOS" in tags:
return tags["FEI_HELIOS"].value
@lazyattr
def sem_metadata(self):
"""Return SEM metadata from CZ_SEM tag as dict."""
if not self.is_sem:
return
return self.pages[0].tags["CZ_SEM"].value
@lazyattr
def mdgel_metadata(self):
"""Return consolidated metadata from MD GEL tags as dict."""
for page in self.pages[:2]:
if "MDFileTag" in page.tags:
tags = page.tags
break
else:
return
result = {}
for code in range(33445, 33453):
name = TIFF.TAGS[code]
if name not in tags:
continue
result[name[2:]] = tags[name].value
return result
@lazyattr
def andor_metadata(self):
"""Return Andor tags as dict."""
return self.pages[0].andor_tags
@lazyattr
def epics_metadata(self):
"""Return EPICS areaDetector tags as dict."""
return self.pages[0].epics_tags
@lazyattr
def tvips_metadata(self):
"""Return TVIPS tag as dict."""
if not self.is_tvips:
return
return self.pages[0].tags["TVIPS"].value
@lazyattr
def metaseries_metadata(self):
"""Return MetaSeries metadata from image description as dict."""
if not self.is_metaseries:
return
return metaseries_description_metadata(self.pages[0].description)
@lazyattr
def pilatus_metadata(self):
"""Return Pilatus metadata from image description as dict."""
if not self.is_pilatus:
return
return pilatus_description_metadata(self.pages[0].description)
@lazyattr
def micromanager_metadata(self):
"""Return consolidated MicroManager metadata as dict."""
if not self.is_micromanager:
return
# from file header
result = read_micromanager_metadata(self._fh)
# from tag
result.update(self.pages[0].tags["MicroManagerMetadata"].value)
return result
@lazyattr
def scanimage_metadata(self):
"""Return ScanImage non-varying frame and ROI metadata as dict."""
if not self.is_scanimage:
return
result = {}
try:
framedata, roidata = read_scanimage_metadata(self._fh)
result["FrameData"] = framedata
result.update(roidata)
except ValueError:
pass
# TODO: scanimage_artist_metadata
try:
result["Description"] = scanimage_description_metadata(
self.pages[0].description
)
except Exception as e:
warnings.warn("scanimage_description_metadata failed: %s" % e)
return result
@property
def geotiff_metadata(self):
"""Return GeoTIFF metadata from first page as dict."""
if not self.is_geotiff:
return
return self.pages[0].geotiff_tags
| TiffFile |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/yaml_template/test_yaml_template_generator.py | {
"start": 533,
"end": 8415
} | class ____:
@property
def test_data_dir(self) -> Path:
"""Get the test data directory path."""
return Path(__file__).parent / "test_data"
def load_expected_schema(self, test_case_name: str) -> str:
"""Load expected schema template from disk for a given test case."""
expected_file = self.test_data_dir / test_case_name / "expected_schema"
return expected_file.read_text().strip()
def load_expected_example(self, test_case_name: str) -> str:
"""Load expected example values from disk for a given test case."""
expected_file = self.test_data_dir / test_case_name / "expected_example"
return expected_file.read_text().strip()
def get_component_schema(self, component_class) -> dict:
"""Generate JSON schema from a component class."""
model_cls = component_class.get_model_cls()
schema = model_cls.model_json_schema()
return schema
def test_simple_pipes_script_component_template(self):
"""Test generation of SimplePipesScriptComponent template using actual component class."""
from dagster_test.components import SimplePipesScriptComponent
# Get schema from the actual component class
schema = self.get_component_schema(SimplePipesScriptComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema(
"dagster_test.components.simple_pipes_script_asset.SimplePipesScriptComponent", schema
)
expected_schema = self.load_expected_schema("simple_example")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values(
"dagster_test.components.simple_pipes_script_asset.SimplePipesScriptComponent", schema
)
expected_example = self.load_expected_example("simple_example")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
def test_defs_folder_component_template(self):
"""Test generation of DefsFolderComponent template using actual component class."""
# Get schema from the actual component class
schema = self.get_component_schema(dg.DefsFolderComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema("dagster.DefsFolderComponent", schema)
expected_schema = self.load_expected_schema("defs_folder")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values("dagster.DefsFolderComponent", schema)
expected_example = self.load_expected_example("defs_folder")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
def test_fivetran_account_component_template(self):
"""Test generation of FivetranAccountComponent template using actual component class."""
# Get schema from the actual component class
schema = self.get_component_schema(FivetranAccountComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema(
"dagster_fivetran.FivetranAccountComponent", schema
)
expected_schema = self.load_expected_schema("account")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values(
"dagster_fivetran.FivetranAccountComponent", schema
)
expected_example = self.load_expected_example("account")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
def test_python_script_component_template(self):
"""Test generation of PythonScriptComponent template using actual component class."""
# Get schema from the actual component class
schema = self.get_component_schema(dg.PythonScriptComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema("dagster.PythonScriptComponent", schema)
expected_schema = self.load_expected_schema("python_script")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values("dagster.PythonScriptComponent", schema)
expected_example = self.load_expected_example("python_script")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
def test_sling_replication_collection_component_template(self):
"""Test generation of SlingReplicationCollectionComponent template using actual component class."""
# Get schema from the actual component class
schema = self.get_component_schema(SlingReplicationCollectionComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema(
"dagster_sling.SlingReplicationCollectionComponent", schema
)
expected_schema = self.load_expected_schema("sling_replication_collection")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values(
"dagster_sling.SlingReplicationCollectionComponent", schema
)
expected_example = self.load_expected_example("sling_replication_collection")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
def test_dbt_project_component_template(self):
"""Test generation of DbtProjectComponent template using actual component class."""
# Get schema from the actual component class
schema = self.get_component_schema(DbtProjectComponent)
# Test schema generation
schema_result = generate_defs_yaml_schema("dagster_dbt.DbtProjectComponent", schema)
expected_schema = self.load_expected_schema("dbt_project")
assert schema_result.strip() == expected_schema, (
f"Generated schema does not match expected output.\n\nGenerated:\n{schema_result}\n\nExpected:\n{expected_schema}"
)
# Test example values generation
example_result = generate_defs_yaml_example_values(
"dagster_dbt.DbtProjectComponent", schema
)
expected_example = self.load_expected_example("dbt_project")
assert example_result.strip() == expected_example, (
f"Generated example values do not match expected output.\n\nGenerated:\n{example_result}\n\nExpected:\n{expected_example}"
)
| TestYamlTemplateGenerator |
python | huggingface__transformers | src/transformers/models/deepseek_v2/modular_deepseek_v2.py | {
"start": 1401,
"end": 11420
} | class ____(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate a DeepSeek
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of DeepSeek-V2-Lite" [deepseek-ai/DeepSeek-V2-Lite"](https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the DeepSeek model. Defines the number of different tokens that can be represented by the
`input_ids` passed when calling [`DeepseekV2Model`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
The number of key-value heads used to implement Grouped Query Attention (GQA). If
`num_key_value_heads=num_attention_heads`, the model will use Multi-Head Attention (MHA). If
`num_key_value_heads=1`, the model will use Multi-Query Attention (MQA). Otherwise, GQA is used.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon value used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/value attentions (useful for inference optimization).
pad_token_id (`int`, *optional*):
Padding token ID.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning-of-sequence token ID.
eos_token_id (`int`, *optional*, defaults to 2):
End-of-sequence token ID.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value, and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability applied to attention weights.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias term in the MLP layers.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers in the shallow layers before switching to MoE layers.
kv_lora_rank (`int`, *optional*, defaults to 512):
Rank of the LoRA decomposition for key-value projections.
q_lora_rank (`int`, *optional*, defaults to 1536):
Rank of the LoRA decomposition for query projections.
Specifically, it determines the dimensionality to which the query (q) vectors are compressed before being expanded back to their original size.
It reduces computational overhead while maintaining model performance.
n_group (`int`, *optional*):
Number of groups for routed experts.
n_routed_experts (`int`, *optional*, defaults to 64):
Number of routed experts (None indicates a dense model).
n_shared_experts (`int`, *optional*, defaults to 2):
Number of shared experts (None indicates a dense model).
qk_nope_head_dim (`int`, *optional*, defaults to 128):
The head dimension for the QK (query-key) projections when using NOPE (Neural Operator Position Encoding).
qk_rope_head_dim (`int`, *optional*, defaults to 64):
The head dimension for QK projections when using RoPE.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor for routed experts in MoE models.
topk_group (`int`, *optional*):
Number of selected groups per token for expert selection.
topk_method (`str`, *optional*, defaults to `"greedy"`):
The method used for selecting top-k experts in the routed gate mechanism.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to renormalize the router probabilities when `top_k > 1`. This flag is kept for backward
compatibility with previously released checkpoints and runtimes relying on the legacy DeepSeek config.
v_head_dim (`int`, *optional*, defaults to 128):
The dimension of value projections in the attention layers.
num_experts_per_tok (`int`, *optional*):
The number of experts selected per token. If `None`, the model behaves as a dense Transformer.
moe_intermediate_size (`int`, *optional*, defaults to 1407):
Dimension of the MoE (Mixture of Experts) representations.
```python
>>> from transformers import DeepseekV2Model, DeepseekV2Config
>>> # Initializing a DeepSeek-V2 style configuration
>>> configuration = DeepseekV2Config()
>>> # Accessing the model configuration
>>> model = DeepseekV2Model(configuration)
>>> print(model.config)
```
"""
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.q_a_proj": "colwise",
"layers.*.self_attn.q_b_proj": "colwise",
"layers.*.self_attn.kv_b_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_colwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
model_type = "deepseek_v2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
first_k_dense_replace: Optional[int] = 0,
kv_lora_rank: Optional[int] = 512,
q_lora_rank: Optional[int] = 1536,
n_group: Optional[int] = None,
n_routed_experts: Optional[int] = 64,
n_shared_experts: Optional[int] = 2,
qk_nope_head_dim: Optional[int] = 128,
qk_rope_head_dim: Optional[int] = 64,
routed_scaling_factor: Optional[float] = 1.0,
topk_group: Optional[int] = None,
topk_method: Optional[str] = "greedy",
norm_topk_prob: Optional[bool] = False,
v_head_dim: Optional[int] = 128,
num_experts_per_tok: Optional[int] = None,
moe_intermediate_size: Optional[int] = 1407,
**kwargs,
):
self.first_k_dense_replace = first_k_dense_replace
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.n_group = n_group
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.routed_scaling_factor = routed_scaling_factor
self.topk_group = topk_group
self.topk_method = topk_method
self.norm_topk_prob = norm_topk_prob
self.v_head_dim = v_head_dim
self.num_experts_per_tok = num_experts_per_tok
self.moe_intermediate_size = moe_intermediate_size
super().__init__(**kwargs)
self.head_dim = qk_rope_head_dim
del self.pretraining_tp
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
# Broadcast to [1, 1, seq_len, dim // 2]
freqs_cis = freqs_cis.unsqueeze(1).to(xq_.device)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3).type_as(xq)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3).type_as(xk)
return xq_out, xk_out
| DeepseekV2Config |
python | tensorflow__tensorflow | tensorflow/python/eager/remote_test.py | {
"start": 8388,
"end": 10848
} | class ____(test.TestCase):
def setUp(self):
super(RemoteAsyncTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def tearDown(self):
super(RemoteAsyncTest, self).tearDown()
# Reset the context to avoid polluting other test cases.
context._reset_context()
def test_out_of_range_with_while_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
while True:
try:
with ops.device('/job:worker/task:0'):
train_step(iterator)
except (errors.OutOfRangeError, errors.InternalError):
context.async_clear_error()
break
self.assertAllEqual(v.numpy(), 4.0)
def test_out_of_range_with_for_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
num_steps = 3
for i in range(num_steps):
try:
with ops.device('/job:worker/task:0'):
train_step(iterator)
if i == num_steps - 1:
context.async_wait()
except errors.OutOfRangeError:
context.async_clear_error()
break
self.assertAllEqual(v.numpy(), 4.0)
def test_out_of_range_with_async_scope(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
num_steps = 3
try:
with context.async_scope():
for _ in range(num_steps):
with ops.device('/job:worker/task:0'):
train_step(iterator)
except errors.OutOfRangeError:
context.async_clear_error()
self.assertAllEqual(v.numpy(), 4.0)
| RemoteAsyncTest |
python | PrefectHQ__prefect | tests/server/models/test_flows.py | {
"start": 2448,
"end": 3068
} | class ____:
async def test_read_flow(self, session):
# create a flow to read
flow = await models.flows.create_flow(
session=session, flow=schemas.core.Flow(name="my-flow")
)
assert flow.name == "my-flow"
read_flow = await models.flows.read_flow(session=session, flow_id=flow.id)
assert flow.id == read_flow.id
assert flow.name == read_flow.name
async def test_read_flow_returns_none_if_does_not_exist(self, session):
result = await models.flows.read_flow(session=session, flow_id=str(uuid4()))
assert result is None
| TestReadFlow |
python | cython__cython | tests/run/for_in_iter.py | {
"start": 2527,
"end": 3446
} | class ____(object):
def __init__(self):
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i > 5:
raise StopIteration
self.i += 1
self.__next__ = self.next2
return 1
def next2(self):
self.__next__ = self.next3
return 2
def next3(self):
del self.__next__
raise StopIteration
def for_in_next_replacing_iter():
"""
>>> for_in_pyiter(NextReplacingIterable())
[1, 1, 1, 1, 1, 1]
"""
def for_in_gen(N):
"""
>>> for_in_pyiter(for_in_gen(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for i in range(N):
yield i
def for_in_range_invalid_arg_count():
"""
>>> for_in_range_invalid_arg_count() # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...
"""
for i in range(1, 2, 3, 4):
pass
| NextReplacingIterable |
python | pypa__setuptools | setuptools/_scripts.py | {
"start": 3075,
"end": 3157
} | class ____(CommandSpec):
split_args = _SplitArgs(posix=False)
| WindowsCommandSpec |
python | google__jax | jax/_src/test_util.py | {
"start": 66300,
"end": 71870
} | class ____(np.vectorize):
"""Same as numpy.vectorize but using mpmath backend for function evaluation.
"""
map_float_to_complex = dict(float16='complex32', float32='complex64', float64='complex128', float128='complex256', longdouble='clongdouble')
map_complex_to_float = {v: k for k, v in map_float_to_complex.items()}
float_prec = dict(
# float16=11,
float32=24,
float64=53,
# float128=113,
# longdouble=113
)
float_minexp = dict(
float16=-14,
float32=-126,
float64=-1022,
float128=-16382
)
float_maxexp = dict(
float16=16,
float32=128,
float64=1024,
float128=16384,
)
def __init__(self, *args, **kwargs):
mpmath = kwargs.pop('mpmath', None)
if mpmath is None:
raise ValueError('vectorize_with_mpmath: no mpmath argument specified')
self.extra_prec_multiplier = kwargs.pop('extra_prec_multiplier', 0)
self.extra_prec = kwargs.pop('extra_prec', 0)
self.mpmath = mpmath
self.contexts = {}
self.contexts_inv = {}
for fp_format, prec in self.float_prec.items():
ctx = self.mpmath.mp.clone()
ctx.prec = prec
self.contexts[fp_format] = ctx
self.contexts_inv[ctx] = fp_format
super().__init__(*args, **kwargs)
def get_context(self, x):
if isinstance(x, (np.ndarray, np.floating, np.complexfloating)):
fp_format = str(x.dtype)
fp_format = self.map_complex_to_float.get(fp_format, fp_format)
return self.contexts[fp_format]
raise NotImplementedError(f'get mpmath context from {type(x).__name__} instance')
def nptomp(self, x):
"""Convert numpy array/scalar to an array/instance of mpmath number type.
"""
if isinstance(x, np.ndarray):
return np.fromiter(map(self.nptomp, x.flatten()), dtype=object).reshape(x.shape)
elif isinstance(x, np.floating):
mpmath = self.mpmath
ctx = self.get_context(x)
prec, rounding = ctx._prec_rounding
if np.isposinf(x):
return ctx.make_mpf(mpmath.libmp.finf)
elif np.isneginf(x):
return ctx.make_mpf(mpmath.libmp.fninf)
elif np.isnan(x):
return ctx.make_mpf(mpmath.libmp.fnan)
elif np.isfinite(x):
mantissa, exponent = np.frexp(x)
man = int(np.ldexp(mantissa, prec))
exp = int(exponent - prec)
r = ctx.make_mpf(mpmath.libmp.from_man_exp(man, exp, prec, rounding))
assert ctx.isfinite(r), r._mpf_
return r
elif isinstance(x, np.complexfloating):
re, im = self.nptomp(x.real), self.nptomp(x.imag)
return re.context.make_mpc((re._mpf_, im._mpf_))
raise NotImplementedError(f'convert {type(x).__name__} instance to mpmath number type')
def mptonp(self, x):
"""Convert mpmath instance to numpy array/scalar type.
"""
if isinstance(x, np.ndarray) and x.dtype.kind == 'O':
x_flat = x.flatten()
item = x_flat[0]
ctx = item.context
fp_format = self.contexts_inv[ctx]
if isinstance(item, ctx.mpc):
dtype = getattr(np, self.map_float_to_complex[fp_format])
elif isinstance(item, ctx.mpf):
dtype = getattr(np, fp_format)
else:
dtype = None
if dtype is not None:
return np.fromiter(map(self.mptonp, x_flat), dtype=dtype).reshape(x.shape)
elif isinstance(x, self.mpmath.ctx_mp.mpnumeric):
ctx = x.context
if isinstance(x, ctx.mpc):
fp_format = self.contexts_inv[ctx]
dtype = getattr(np, self.map_float_to_complex[fp_format])
r = dtype().reshape(1).view(getattr(np, fp_format))
r[0] = self.mptonp(x.real)
r[1] = self.mptonp(x.imag)
return r.view(dtype)[0]
elif isinstance(x, ctx.mpf):
fp_format = self.contexts_inv[ctx]
dtype = getattr(np, fp_format)
if ctx.isfinite(x):
sign, man, exp, bc = self.mpmath.libmp.normalize(*x._mpf_, *ctx._prec_rounding)
assert bc >= 0, (sign, man, exp, bc, x._mpf_)
if exp + bc < self.float_minexp[fp_format]:
return -ctx.zero if sign else ctx.zero
if exp + bc > self.float_maxexp[fp_format]:
return ctx.ninf if sign else ctx.inf
man = dtype(-man if sign else man)
r = np.ldexp(man, exp)
assert np.isfinite(r), (x, r, x._mpf_, man)
return r
elif ctx.isnan(x):
return dtype(np.nan)
elif ctx.isinf(x):
return dtype(-np.inf if x._mpf_[0] else np.inf)
raise NotImplementedError(f'convert {type(x)} instance to numpy floating point type')
def __call__(self, *args, **kwargs):
mp_args = []
context: Any = None
for a in args:
if isinstance(a, (np.ndarray, np.floating, np.complexfloating)):
mp_args.append(self.nptomp(a))
if context is None:
context = self.get_context(a)
else:
assert context is self.get_context(a)
else:
mp_args.append(a)
extra_prec = int(context.prec * self.extra_prec_multiplier) + self.extra_prec
with context.extraprec(extra_prec):
result = super().__call__(*mp_args, **kwargs)
if isinstance(result, tuple):
lst = []
for r in result:
if ((isinstance(r, np.ndarray) and r.dtype.kind == 'O')
or isinstance(r, self.mpmath.ctx_mp.mpnumeric)):
r = self.mptonp(r)
lst.append(r)
return tuple(lst)
if ((isinstance(result, np.ndarray) and result.dtype.kind == 'O')
or isinstance(result, self.mpmath.ctx_mp.mpnumeric)):
return self.mptonp(result)
return result
| vectorize_with_mpmath |
python | doocs__leetcode | solution/3100-3199/3154.Find Number of Ways to Reach the K-th Stair/Solution.py | {
"start": 0,
"end": 391
} | class ____:
def waysToReachStair(self, k: int) -> int:
@cache
def dfs(i: int, j: int, jump: int) -> int:
if i > k + 1:
return 0
ans = int(i == k)
if i > 0 and j == 0:
ans += dfs(i - 1, 1, jump)
ans += dfs(i + (1 << jump), 0, jump + 1)
return ans
return dfs(1, 0, 0)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/reorder-list.py | {
"start": 233,
"end": 987
} | class ____(object):
# @param head, a ListNode
# @return nothing
def reorderList(self, head):
if head == None or head.next == None:
return head
fast, slow, prev = head, head, None
while fast != None and fast.next != None:
fast, slow, prev = fast.next.next, slow.next, slow
current, prev.next, prev = slow, None, None
while current != None:
current.next, prev, current = prev, current, current.next
l1, l2 = head, prev
dummy = ListNode(0)
current = dummy
while l1 != None and l2 != None:
current.next, current, l1 = l1, l1, l1.next
current.next, current, l2 = l2, l2, l2.next
return dummy.next
| Solution |
python | numba__numba | numba/core/annotations/type_annotations.py | {
"start": 272,
"end": 984
} | class ____(Mapping):
def __init__(self, func):
try:
lines, startno = inspect.getsourcelines(func)
except OSError:
self.lines = ()
self.startno = 0
else:
self.lines = textwrap.dedent(''.join(lines)).splitlines()
self.startno = startno
def __getitem__(self, lineno):
try:
return self.lines[lineno - self.startno].rstrip()
except IndexError:
return ''
def __iter__(self):
return iter((self.startno + i) for i in range(len(self.lines)))
def __len__(self):
return len(self.lines)
@property
def avail(self):
return bool(self.lines)
| SourceLines |
python | astropy__astropy | astropy/visualization/wcsaxes/__init__.py | {
"start": 554,
"end": 1264
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.visualization.wcsaxes`.
"""
coordinate_range_samples = _config.ConfigItem(
50,
"The number of samples along each image axis when determining "
"the range of coordinates in a plot.",
)
frame_boundary_samples = _config.ConfigItem(
1000,
"How many points to sample along the axes when determining tick locations.",
)
grid_samples = _config.ConfigItem(
1000, "How many points to sample along grid lines."
)
contour_grid_samples = _config.ConfigItem(
200, "The grid size to use when drawing a grid using contours"
)
conf = Conf()
| Conf |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/biases/boost_replay_id_bias.py | {
"start": 191,
"end": 952
} | class ____(Bias):
"""
Boosts at 100% sample rate all the traces that have a replay_id.
"""
def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]:
return [
{
"samplingValue": {"type": "sampleRate", "value": 1.0},
"type": "trace",
"condition": {
"op": "not",
"inner": {
"op": "eq",
"name": "trace.replay_id",
"value": None,
"options": {"ignoreCase": True},
},
},
"id": RESERVED_IDS[RuleType.BOOST_REPLAY_ID_RULE],
}
]
| BoostReplayIdBias |
python | sympy__sympy | sympy/polys/domains/complexfield.py | {
"start": 502,
"end": 6159
} | class ____(Field, CharacteristicZero, SimpleDomain):
"""Complex numbers up to the given precision. """
rep = 'CC'
is_ComplexField = is_CC = True
is_Exact = False
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
_default_precision = 53
@property
def has_default_precision(self):
return self.precision == self._default_precision
@property
def precision(self):
return self._context.prec
@property
def dps(self):
return self._context.dps
@property
def tolerance(self):
return self._tolerance
def __init__(self, prec=None, dps=None, tol=None):
# XXX: The tolerance parameter is ignored but is kept for backward
# compatibility for now.
context = MPContext()
if prec is None and dps is None:
context.prec = self._default_precision
elif dps is None:
context.prec = prec
elif prec is None:
context.dps = dps
else:
raise TypeError("Cannot set both prec and dps")
self._context = context
self._dtype = context.mpc
self.zero = self.dtype(0)
self.one = self.dtype(1)
# XXX: Neither of these is actually used anywhere.
self._max_denom = max(2**context.prec // 200, 99)
self._tolerance = self.one / self._max_denom
@property
def tp(self):
# XXX: Domain treats tp as an alias of dtype. Here we need two separate
# things: dtype is a callable to make/convert instances. We use tp with
# isinstance to check if an object is an instance of the domain
# already.
return self._dtype
def dtype(self, x, y=0):
# XXX: This is needed because mpmath does not recognise fmpz.
# It might be better to add conversion routines to mpmath and if that
# happens then this can be removed.
if isinstance(x, SYMPY_INTS):
x = int(x)
if isinstance(y, SYMPY_INTS):
y = int(y)
return self._dtype(x, y)
def __eq__(self, other):
return isinstance(other, ComplexField) and self.precision == other.precision
def __hash__(self):
return hash((self.__class__.__name__, self._dtype, self.precision))
def to_sympy(self, element):
"""Convert ``element`` to SymPy number. """
return Float(element.real, self.dps) + I*Float(element.imag, self.dps)
def from_sympy(self, expr):
"""Convert SymPy's number to ``dtype``. """
number = expr.evalf(n=self.dps)
real, imag = number.as_real_imag()
if real.is_Number and imag.is_Number:
return self.dtype(real, imag)
else:
raise CoercionFailed("expected complex number, got %s" % expr)
def from_ZZ(self, element, base):
return self.dtype(element)
def from_ZZ_gmpy(self, element, base):
return self.dtype(int(element))
def from_ZZ_python(self, element, base):
return self.dtype(element)
def from_QQ(self, element, base):
return self.dtype(int(element.numerator)) / int(element.denominator)
def from_QQ_python(self, element, base):
return self.dtype(element.numerator) / element.denominator
def from_QQ_gmpy(self, element, base):
return self.dtype(int(element.numerator)) / int(element.denominator)
def from_GaussianIntegerRing(self, element, base):
return self.dtype(int(element.x), int(element.y))
def from_GaussianRationalField(self, element, base):
x = element.x
y = element.y
return (self.dtype(int(x.numerator)) / int(x.denominator) +
self.dtype(0, int(y.numerator)) / int(y.denominator))
def from_AlgebraicField(self, element, base):
return self.from_sympy(base.to_sympy(element).evalf(self.dps))
def from_RealField(self, element, base):
return self.dtype(element)
def from_ComplexField(self, element, base):
return self.dtype(element)
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError("there is no ring associated with %s" % self)
def get_exact(self):
"""Returns an exact domain associated with ``self``. """
return QQ_I
def is_negative(self, element):
"""Returns ``False`` for any ``ComplexElement``. """
return False
def is_positive(self, element):
"""Returns ``False`` for any ``ComplexElement``. """
return False
def is_nonnegative(self, element):
"""Returns ``False`` for any ``ComplexElement``. """
return False
def is_nonpositive(self, element):
"""Returns ``False`` for any ``ComplexElement``. """
return False
def gcd(self, a, b):
"""Returns GCD of ``a`` and ``b``. """
return self.one
def lcm(self, a, b):
"""Returns LCM of ``a`` and ``b``. """
return a*b
def almosteq(self, a, b, tolerance=None):
"""Check if ``a`` and ``b`` are almost equal. """
return self._context.almosteq(a, b, tolerance)
def is_square(self, a):
"""Returns ``True``. Every complex number has a complex square root."""
return True
def exsqrt(self, a):
r"""Returns the principal complex square root of ``a``.
Explanation
===========
The argument of the principal square root is always within
$(-\frac{\pi}{2}, \frac{\pi}{2}]$. The square root may be
slightly inaccurate due to floating point rounding error.
"""
return a ** 0.5
CC = ComplexField()
| ComplexField |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_return/RET501.py | {
"start": 413,
"end": 576
} | class ____:
@cached_property
def prop(self) -> None:
print("Property not found")
return None
import abc
import enum
import types
| BaseCache2 |
python | allegroai__clearml | clearml/hyperdatasets/data_view.py | {
"start": 978,
"end": 6288
} | class ____:
lucene_parser_warning_sent = False
@classmethod
def _validate_lucene(cls, lucene_query):
"""Validate the supplied Lucene query string using `luqum`.
Empty strings are considered valid. Non-empty values are parsed and raise a
`LuceneParseError` when the expression is malformed.
:param lucene_query: Lucene query string to validate
:return: None
"""
if not lucene_parser:
if not cls.lucene_parser_warning_sent:
logging.getLogger("DataView").warning(
"Could not validate lucene query because 'luqum' is not installed. "
"Run 'pip install luqum' to enable query validation"
)
cls.lucene_parser_warning_sent = True
return
if not lucene_query:
return
try:
lucene_parser.parse(lucene_query)
except LuceneParseError as e:
raise type(e)("Failed parsing lucene query '{}': {}".format(lucene_query, e))
def __init__(
self,
project_id="*", # ClearML datasets: collection id
dataset_id="*", # ClearML datasets: version id
version_id="*", # Alias for clarity; kept for symmetry
source_query=None,
frame_query=None,
weight=1.0,
filter_by_roi=None,
label_rules=None,
):
"""Construct a hyper-dataset query filter.
When concrete dataset/version IDs are supplied the constructor verifies their existence via
`HyperDatasetManagement`. Optional Lucene queries, ROI filtering, and sampling weights can be
provided to further refine the query.
:param project_id: Dataset collection identifier or wildcard
:param dataset_id: Dataset identifier or wildcard (legacy) used when version is omitted
:param version_id: Dataset version identifier; defaults to `dataset_id` when empty
:param source_query: Lucene query applied to frame source metadata
:param frame_query: Lucene query applied to frame metadata
:param weight: Relative sampling weight for this query
:param filter_by_roi: Optional ROI filtering strategy
:param label_rules: Optional label-rule dictionaries for ROI filtering
"""
HyperDatasetQuery._validate_lucene(source_query)
HyperDatasetQuery._validate_lucene(frame_query)
self._project_id = project_id
# Prefer explicit version_id if provided, else dataset_id acts as version id
self._dataset_id = dataset_id
self._version_id = version_id or dataset_id
self._validate_dataset_and_version()
self._source_query = source_query
self._frame_query = frame_query
self._weight = weight
self._filter_by_roi = filter_by_roi
self._label_rules = label_rules
@property
def dataset_id(self):
"""
Return the dataset identifier targeted by this query.
:return: Dataset ID string or wildcard marker
"""
return self._dataset_id
@property
def project_id(self):
"""
Return the dataset collection identifier associated with this query.
:return: Project ID string or wildcard marker
"""
return self._project_id
@property
def version_id(self):
"""
Return the dataset version identifier resolved for this query.
:return: Version ID string or wildcard marker
"""
return self._version_id
@property
def source_query(self):
"""
Return the Lucene query applied to frame source metadata.
:return: Lucene query string or None
"""
return self._source_query
@property
def frame_query(self):
"""
Return the Lucene query applied to frame-level metadata.
:return: Lucene query string or None
"""
return self._frame_query
@property
def weight(self):
"""
Return the relative sampling weight assigned to this query.
:return: Sampling weight as a float
"""
return self._weight
@property
def filter_by_roi(self):
"""
Return the ROI filtering strategy configured for this query.
:return: ROI filter identifier or None
"""
return self._filter_by_roi
@property
def label_rules(self):
"""
Return the label rule definitions used for ROI filtering.
:return: Sequence or mapping of label rules, or None
"""
return self._label_rules
def _validate_dataset_and_version(self):
"""Verify that referenced dataset and version identifiers exist on the backend."""
if self._dataset_id in (None, "*"):
return
version_id = self._version_id if self._version_id not in (None, "*") else None
exists = HyperDatasetManagement.exists(
dataset_id=self._dataset_id,
version_id=version_id,
)
if not exists:
raise ValueError(
"HyperDataset query references non-existent dataset/version: dataset_id={} version_id={}".format(
self._dataset_id, self._version_id
)
)
| HyperDatasetQuery |
python | PyCQA__pylint | tests/functional/m/mixin_class_rgx.py | {
"start": 658,
"end": 880
} | class ____:
"""Class that does not match the option pattern"""
def set_attribute(self):
"""Set an attribute outside of __init__"""
self.attr = 1 # [attribute-defined-outside-init]
| OutsideInitMixedin |
python | python-openxml__python-docx | src/docx/shared.py | {
"start": 11368,
"end": 11858
} | class ____:
"""Provides common services for document elements that occur below a part but may
occasionally require an ancestor object to provide a service, such as add or drop a
relationship.
Provides ``self._parent`` attribute to subclasses.
"""
def __init__(self, parent: t.ProvidesXmlPart):
self._parent = parent
@property
def part(self) -> XmlPart:
"""The package part containing this object."""
return self._parent.part
| Parented |
python | pydata__xarray | xarray/computation/arithmetic.py | {
"start": 4043,
"end": 4155
} | class ____(
SupportsArithmetic,
DataArrayGroupByOpsMixin,
):
__slots__ = ()
| DataArrayGroupbyArithmetic |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/radialaxis/title/_font.py | {
"start": 235,
"end": 9946
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar.radialaxis.title"
_path_str = "layout.polar.radialaxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.r
adialaxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.radialaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.radialaxis.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | gevent__gevent | src/gevent/tests/test__memleak.py | {
"start": 263,
"end": 1466
} | class ____(TestCase): # pragma: no cover
# pylint:disable=bare-except,no-member
def test(self):
refcounts = []
for _ in range(15):
try:
Timeout.start_new(0.01)
gevent.sleep(0.1)
self.fail('must raise Timeout')
except Timeout:
pass
refcounts.append(sys.gettotalrefcount())
# Refcounts may go down, but not up
# XXX: JAM: I think this may just be broken. Each time we add
# a new integer to our list of refcounts, we'll be
# creating a new reference. This makes sense when we see the list
# go up by one each iteration:
#
# AssertionError: 530631 not less than or equal to 530630
# : total refcount mismatch:
# [530381, 530618, 530619, 530620, 530621,
# 530622, 530623, 530624, 530625, 530626,
# 530627, 530628, 530629, 530630, 530631]
final = refcounts[-1]
previous = refcounts[-2]
self.assertLessEqual(
final, previous,
"total refcount mismatch: %s" % refcounts)
if __name__ == '__main__':
unittest.main()
| TestQueue |
python | google__pytype | pytype/state.py | {
"start": 805,
"end": 6458
} | class ____(utils.ContextWeakrefMixin):
"""Immutable state object, for attaching to opcodes."""
__slots__ = ["block_stack", "data_stack", "node", "exception", "why"]
def __init__(self, data_stack, block_stack, node, ctx, exception, why):
super().__init__(ctx)
self.data_stack = data_stack
self.block_stack = block_stack
self.node = node
self.exception = exception
self.why = why
@classmethod
def init(cls, node, ctx):
return FrameState((), (), node, ctx, False, None)
def __setattribute__(self):
raise AttributeError("States are immutable.")
def set_why(self, why):
return FrameState(
self.data_stack,
self.block_stack,
self.node,
self.ctx,
self.exception,
why,
)
def set_stack(self, new_stack):
return FrameState(
new_stack,
self.block_stack,
self.node,
self.ctx,
self.exception,
self.why,
)
def push(self, *values):
"""Push value(s) onto the value stack."""
return self.set_stack(self.data_stack + tuple(values))
def peek(self, n):
"""Get a value `n` entries down in the stack, without changing the stack."""
return self.data_stack[-n]
def top(self):
return self.data_stack[-1]
def topn(self, n):
if n > 0:
return self.data_stack[-n:]
else:
return ()
def pop(self):
"""Pop a value from the value stack."""
if not self.data_stack:
raise IndexError("Trying to pop from an empty stack")
value = self.data_stack[-1]
return self.set_stack(self.data_stack[:-1]), value
def pop_and_discard(self):
"""Pop a value from the value stack and discard it."""
return self.set_stack(self.data_stack[:-1])
def popn(self, n):
"""Return n values, ordered oldest-to-newest."""
if not n:
# Not an error: E.g. function calls with no parameters pop zero items
return self, ()
if len(self.data_stack) < n:
raise IndexError(
"Trying to pop %d values from stack of size %d"
% (n, len(self.data_stack))
)
values = self.data_stack[-n:]
return self.set_stack(self.data_stack[:-n]), values
def set_top(self, value):
"""Replace top of data stack with value."""
return self.set_stack(self.data_stack[:-1] + (value,))
def set_second(self, value):
"""Replace second element of data stack with value."""
return self.set_stack(self.data_stack[:-2] + (value, self.data_stack[-1]))
def rotn(self, n):
"""Rotate the top n values by one."""
if len(self.data_stack) < n:
raise IndexError(
"Trying to rotate %d values from stack of size %d"
% (n, len(self.data_stack))
)
top = self.data_stack[-1]
rot = self.data_stack[-n:-1]
return self.set_stack(self.data_stack[:-n] + (top,) + rot)
def swap(self, n):
"""Swap the top of the data stack with the value in position n."""
if len(self.data_stack) < n:
raise IndexError(
"Trying to swap value %d in stack of size %d"
% (n, len(self.data_stack))
)
top = self.data_stack[-1]
nth = self.data_stack[-n]
in_between = self.data_stack[(-n + 1) : -1]
rest = self.data_stack[:-n]
return self.set_stack(rest + (top,) + in_between + (nth,))
def push_block(self, block):
"""Push a block on to the block stack."""
return FrameState(
self.data_stack,
self.block_stack + (block,),
self.node,
self.ctx,
self.exception,
self.why,
)
def pop_block(self):
"""Pop a block from the block stack."""
block = self.block_stack[-1]
return (
FrameState(
self.data_stack,
self.block_stack[:-1],
self.node,
self.ctx,
self.exception,
self.why,
),
block,
)
def change_cfg_node(self, node: cfg.CFGNode) -> "FrameState":
if self.node is node:
return self
return FrameState(
self.data_stack,
self.block_stack,
node,
self.ctx,
self.exception,
self.why,
)
def connect_to_cfg_node(self, node):
self.node.ConnectTo(node)
return self.change_cfg_node(node)
def forward_cfg_node(self, new_name, condition=None):
"""Create a new CFG Node connected to the current cfg node.
Args:
new_name: A name for the new node.
condition: A cfg.Binding representing the condition that needs to be true
for this node to be reached.
Returns:
A new state which is the same as this state except for the node, which is
the new one.
"""
new_node = self.ctx.connect_new_cfg_node(self.node, new_name, condition)
return self.change_cfg_node(new_node)
def merge_into(self, other):
"""Merge with another state."""
if other is None:
return self
assert len(self.data_stack) == len(other.data_stack), (
self.data_stack,
other.data_stack,
)
both = list(zip(self.data_stack, other.data_stack))
if any(v1 is not v2 for v1, v2 in both):
for v, o in both:
o.PasteVariable(v, None)
if self.node is not other.node:
self.node.ConnectTo(other.node)
return FrameState(
other.data_stack,
self.block_stack,
other.node,
self.ctx,
self.exception,
self.why,
)
return self
def set_exception(self):
return FrameState(
self.data_stack,
self.block_stack,
self.ctx.connect_new_cfg_node(self.node, "SetException"),
self.ctx,
True,
self.why,
)
| FrameState |
python | gevent__gevent | src/gevent/testing/timing.py | {
"start": 1618,
"end": 3351
} | class ____(object):
_default_wait_timeout = SMALL_TICK
_default_delay_min_adj = SMALL_TICK_MIN_ADJ
_default_delay_max_adj = SMALL_TICK_MAX_ADJ
def wait(self, timeout):
raise NotImplementedError('override me in subclass')
def _check_delay_bounds(self, timeout, delay,
delay_min_adj=None,
delay_max_adj=None):
delay_min_adj = self._default_delay_min_adj if not delay_min_adj else delay_min_adj
delay_max_adj = self._default_delay_max_adj if not delay_max_adj else delay_max_adj
self.assertTimeWithinRange(delay,
timeout - delay_min_adj,
timeout + delay_max_adj)
def _wait_and_check(self, timeout=None):
if timeout is None:
timeout = self._default_wait_timeout
# gevent.timer instances have a 'seconds' attribute,
# otherwise it's the raw number
seconds = getattr(timeout, 'seconds', timeout)
gevent.get_hub().loop.update_now()
start = perf_counter()
try:
result = self.wait(timeout)
finally:
self._check_delay_bounds(seconds, perf_counter() - start,
self._default_delay_min_adj,
self._default_delay_max_adj)
return result
def test_outer_timeout_is_not_lost(self):
timeout = gevent.Timeout.start_new(SMALLEST_RELIABLE_DELAY, ref=False)
try:
with self.assertRaises(gevent.Timeout) as exc:
self.wait(timeout=1)
self.assertIs(exc.exception, timeout)
finally:
timeout.close()
| _DelayWaitMixin |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/namedTuple2.py | {
"start": 158,
"end": 954
} | class ____(NamedTuple):
entry_1: str
entry_2: int
nt1 = MyDataClass("yes", 1)
(a1, a2) = nt1
a1_1: str = a1
a2_1: int = a2
# These should generate an error because a1 and a2 are
# the wrong types.
a1_2: int = a1
a2_2: str = a2
b1 = nt1[0]
b2 = nt1[1]
b1_1: str = b1
b2_1: int = b2
# These should generate an error because a1 and a2 are
# the wrong types.
b1_2: int = b1
b2_2: str = b2
MyNT = NamedTuple("MyNT", [("hi", int), ("bye", str)])
nt2 = MyNT(3, "yo")
(c1, c2) = nt2
c1_2: int = c1
c2_2: str = c2
# These should generate an error because a1 and a2 are
# the wrong types.
c1_1: str = c1
c2_1: int = c2
d1 = nt2[0]
d2 = nt2[1]
d1_2: int = d1
d2_2: str = d2
# These should generate an error because a1 and a2 are
# the wrong types.
d1_1: str = d1
d2_1: int = d2
| MyDataClass |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 24835,
"end": 72951
} | class ____(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OverflowError, socket.if_indextoname, -1)
self.assertRaises(OverflowError, socket.if_indextoname, 2**1000)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
if hasattr(socket, 'if_nameindex'):
indices = dict(socket.if_nameindex())
for index in indices:
index2 = index + 2**32
if index2 not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index2)
for index in 2**32-1, 2**64-1:
if index not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index)
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = socket_helper.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
| GeneralModuleTests |
python | kamyu104__LeetCode-Solutions | Python/super-palindromes.py | {
"start": 44,
"end": 888
} | class ____(object):
def superpalindromesInRange(self, L, R):
"""
:type L: str
:type R: str
:rtype: int
"""
def is_palindrome(k):
return str(k) == str(k)[::-1]
K = int((10**((len(R)+1)*0.25)))
l, r = int(L), int(R)
result = 0
# count odd length
for k in xrange(K):
s = str(k)
t = s + s[-2::-1]
v = int(t)**2
if v > r:
break
if v >= l and is_palindrome(v):
result += 1
# count even length
for k in xrange(K):
s = str(k)
t = s + s[::-1]
v = int(t)**2
if v > r:
break
if v >= l and is_palindrome(v):
result += 1
return result
| Solution |
python | spyder-ide__spyder | spyder/plugins/profiler/widgets/main_widget.py | {
"start": 1503,
"end": 1671
} | class ____:
GotoDefinition = "goto_definition_action"
ShowCallees = "show_callees_action"
ShowCallers = "show_callers_action"
| ProfilerWidgetContextMenuActions |
python | huggingface__transformers | tests/models/cpmant/test_modeling_cpmant.py | {
"start": 4555,
"end": 6234
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CpmAntModel, CpmAntForCausalLM) if is_torch_available() else ()
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{"feature-extraction": CpmAntModel, "text-generation": CpmAntForCausalLM} if is_torch_available() else {}
)
test_missing_keys = False
test_mismatched_shapes = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = CpmAntModelTester(self)
self.config_tester = ConfigTester(self, config_class=CpmAntConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds)
def test_retain_grad_hidden_states_attentions(self):
unittest.skip(
"CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\
So is attentions. We strongly recommend you use loss to tune model."
)(self.test_retain_grad_hidden_states_attentions)
def test_cpmant_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_cpmant_model(config, inputs)
def test_cpmant_lm_head_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(config, inputs)
@require_torch
| CpmAntModelTest |
python | pytorch__pytorch | test/test_numpy_interop.py | {
"start": 560,
"end": 26713
} | class ____(TestCase):
# Note: the warning this tests for only appears once per program, so
# other instances of this warning should be addressed to avoid
# the tests depending on the order in which they're run.
@onlyCPU
def test_numpy_non_writeable(self, device):
arr = np.zeros(5)
arr.flags["WRITEABLE"] = False
self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))
@onlyCPU
def test_numpy_unresizable(self, device) -> None:
x = np.zeros((2, 2))
y = torch.from_numpy(x) # noqa: F841
with self.assertRaises(ValueError):
x.resize((5, 5))
z = torch.randn(5, 5)
w = z.numpy()
with self.assertRaises(RuntimeError):
z.resize_(10, 10)
with self.assertRaises(ValueError):
w.resize((10, 10))
@onlyCPU
def test_to_numpy(self, device) -> None:
def get_castable_tensor(shape, dtype):
if dtype.is_floating_point:
dtype_info = torch.finfo(dtype)
# can't directly use min and max, because for double, max - min
# is greater than double range and sampling always gives inf.
low = max(dtype_info.min, -1e10)
high = min(dtype_info.max, 1e10)
t = torch.empty(shape, dtype=torch.float64).uniform_(low, high)
else:
# can't directly use min and max, because for int64_t, max - min
# is greater than int64_t range and triggers UB.
low = max(torch.iinfo(dtype).min, int(-1e10))
high = min(torch.iinfo(dtype).max, int(1e10))
t = torch.empty(shape, dtype=torch.int64).random_(low, high)
return t.to(dtype)
dtypes = [
torch.uint8,
torch.int8,
torch.short,
torch.int,
torch.half,
torch.float,
torch.double,
torch.long,
]
for dtp in dtypes:
# 1D
sz = 10
x = get_castable_tensor(sz, dtp)
y = x.numpy()
for i in range(sz):
self.assertEqual(x[i], y[i])
# 1D > 0 storage offset
xm = get_castable_tensor(sz * 2, dtp)
x = xm.narrow(0, sz - 1, sz)
self.assertTrue(x.storage_offset() > 0)
y = x.numpy()
for i in range(sz):
self.assertEqual(x[i], y[i])
def check2d(x, y):
for i in range(sz1):
for j in range(sz2):
self.assertEqual(x[i][j], y[i][j])
# empty
x = torch.tensor([]).to(dtp)
y = x.numpy()
self.assertEqual(y.size, 0)
# contiguous 2D
sz1 = 3
sz2 = 5
x = get_castable_tensor((sz1, sz2), dtp)
y = x.numpy()
check2d(x, y)
self.assertTrue(y.flags["C_CONTIGUOUS"])
# with storage offset
xm = get_castable_tensor((sz1 * 2, sz2), dtp)
x = xm.narrow(0, sz1 - 1, sz1)
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
self.assertTrue(y.flags["C_CONTIGUOUS"])
# non-contiguous 2D
x = get_castable_tensor((sz2, sz1), dtp).t()
y = x.numpy()
check2d(x, y)
self.assertFalse(y.flags["C_CONTIGUOUS"])
# with storage offset
xm = get_castable_tensor((sz2 * 2, sz1), dtp)
x = xm.narrow(0, sz2 - 1, sz2).t()
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
# non-contiguous 2D with holes
xm = get_castable_tensor((sz2 * 2, sz1 * 2), dtp)
x = xm.narrow(0, sz2 - 1, sz2).narrow(1, sz1 - 1, sz1).t()
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
if dtp != torch.half:
# check writeable
x = get_castable_tensor((3, 4), dtp)
y = x.numpy()
self.assertTrue(y.flags.writeable)
y[0][1] = 3
self.assertTrue(x[0][1] == 3)
y = x.t().numpy()
self.assertTrue(y.flags.writeable)
y[0][1] = 3
self.assertTrue(x[0][1] == 3)
def test_to_numpy_bool(self, device) -> None:
x = torch.tensor([True, False], dtype=torch.bool)
self.assertEqual(x.dtype, torch.bool)
y = x.numpy()
self.assertEqual(y.dtype, np.bool_)
for i in range(len(x)):
self.assertEqual(x[i], y[i])
x = torch.tensor([True], dtype=torch.bool)
self.assertEqual(x.dtype, torch.bool)
y = x.numpy()
self.assertEqual(y.dtype, np.bool_)
self.assertEqual(x[0], y[0])
@skipIfTorchDynamo(
"can't check if value is ZeroTensor since _is_zerotensor returns a bool and not a TensorVariable"
)
def test_to_numpy_zero_tensor(self, device) -> None:
dtypes = [
torch.uint8,
torch.int8,
torch.short,
torch.int,
torch.half,
torch.float,
torch.double,
torch.long,
torch.bool,
]
for dtype in dtypes:
x = torch._efficientzerotensor((10), dtype=dtype)
self.assertRaises(RuntimeError, lambda: x.numpy())
y = x.numpy(force=True)
for i in range(10):
self.assertEqual(y[i], 0)
@skipIfTorchDynamo("conj bit not implemented in TensorVariable yet")
def test_to_numpy_force_argument(self, device) -> None:
for force in [False, True]:
for requires_grad in [False, True]:
for sparse in [False, True]:
for conj in [False, True]:
data = [[1 + 2j, -2 + 3j], [-1 - 2j, 3 - 2j]]
x = torch.tensor(
data, requires_grad=requires_grad, device=device
)
y = x
if sparse:
if requires_grad:
continue
x = x.to_sparse()
if conj:
x = x.conj()
y = x.resolve_conj()
expect_error = (
requires_grad or sparse or conj or device != "cpu"
)
error_msg = r"Use (t|T)ensor\..*(\.numpy\(\))?"
if not force and expect_error:
self.assertRaisesRegex(
(RuntimeError, TypeError), error_msg, lambda: x.numpy()
)
self.assertRaisesRegex(
(RuntimeError, TypeError),
error_msg,
lambda: x.numpy(force=False),
)
elif force and sparse:
self.assertRaisesRegex(
TypeError, error_msg, lambda: x.numpy(force=True)
)
else:
self.assertEqual(x.numpy(force=force), y)
def test_from_numpy(self, device) -> None:
dtypes = [
np.double,
np.float64,
np.float16,
np.complex64,
np.complex128,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.longlong,
np.bool_,
]
complex_dtypes = [
np.complex64,
np.complex128,
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
tensor_from_array = torch.from_numpy(array)
# TODO: change to tensor equality check once HalfTensor
# implements `==`
for i in range(len(array)):
self.assertEqual(tensor_from_array[i], array[i])
# ufunc 'remainder' not supported for complex dtypes
if dtype not in complex_dtypes:
# This is a special test case for Windows
# https://github.com/pytorch/pytorch/issues/22615
array2 = array % 2
tensor_from_array2 = torch.from_numpy(array2)
for i in range(len(array2)):
self.assertEqual(tensor_from_array2[i], array2[i])
# Test unsupported type
array = np.array(["foo", "bar"], dtype=np.dtype(np.str_))
with self.assertRaises(TypeError):
tensor_from_array = torch.from_numpy(array)
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26, dtype=torch.float64).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
# check zero dimensional
x = np.zeros((0, 2))
self.assertEqual(torch.from_numpy(x).shape, (0, 2))
x = np.zeros((2, 0))
self.assertEqual(torch.from_numpy(x).shape, (2, 0))
# check ill-sized strides raise exception
x = np.array([3.0, 5.0, 8.0])
x.strides = (3,)
self.assertRaises(ValueError, lambda: torch.from_numpy(x))
@skipIfTorchDynamo("No need to test invalid dtypes that should fail by design.")
def test_from_numpy_no_leak_on_invalid_dtype(self):
# This used to leak memory as the `from_numpy` call raised an exception and didn't decref the temporary
# object. See https://github.com/pytorch/pytorch/issues/121138
x = np.array(b"value")
initial_refcount = sys.getrefcount(x)
for _ in range(1000):
try:
torch.from_numpy(x)
except TypeError:
pass
final_refcount = sys.getrefcount(x)
self.assertEqual(
final_refcount,
initial_refcount,
f"Memory leak detected: refcount increased from {initial_refcount} to {final_refcount}",
)
@skipIfTorchDynamo("No need to test invalid dtypes that should fail by design.")
@onlyCPU
def test_from_numpy_zero_element_type(self):
# This tests that dtype check happens before strides check
# which results in div-by-zero on-x86
x = np.ndarray((3, 3), dtype=str)
self.assertRaises(TypeError, lambda: torch.from_numpy(x))
@skipMeta
def test_from_list_of_ndarray_warning(self, device):
warning_msg = (
r"Creating a tensor from a list of numpy.ndarrays is extremely slow"
)
with self.assertWarnsOnceRegex(UserWarning, warning_msg):
torch.tensor([np.array([0]), np.array([1])], device=device)
def test_ctor_with_invalid_numpy_array_sequence(self, device):
# Invalid list of numpy array
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor(
[np.random.random(size=(3, 3)), np.random.random(size=(3, 0))],
device=device,
)
# Invalid list of list of numpy array
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor(
[[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))]],
device=device,
)
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor(
[
[np.random.random(size=(3, 3)), np.random.random(size=(3, 3))],
[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))],
],
device=device,
)
# expected shape is `[1, 2, 3]`, hence we try to iterate over 0-D array
# leading to type error : not a sequence.
with self.assertRaisesRegex(TypeError, "not a sequence"):
torch.tensor(
[[np.random.random(size=(3)), np.random.random()]], device=device
)
# list of list or numpy array.
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor([[1, 2, 3], np.random.random(size=(2,))], device=device)
@onlyCPU
def test_ctor_with_numpy_scalar_ctor(self, device) -> None:
dtypes = [
np.double,
np.float64,
np.float16,
np.int64,
np.int32,
np.int16,
np.uint8,
np.bool_,
]
for dtype in dtypes:
self.assertEqual(dtype(42), torch.tensor(dtype(42)).item())
@onlyCPU
def test_numpy_index(self, device):
i = np.array([0, 1, 2], dtype=np.int32)
x = torch.randn(5, 5)
for idx in i:
self.assertFalse(isinstance(idx, int))
self.assertEqual(x[idx], x[int(idx)])
@onlyCPU
def test_numpy_index_multi(self, device):
for dim_sz in [2, 8, 16, 32]:
i = np.zeros((dim_sz, dim_sz, dim_sz), dtype=np.int32)
i[: dim_sz // 2, :, :] = 1
x = torch.randn(dim_sz, dim_sz, dim_sz)
self.assertTrue(x[i == 1].numel() == np.sum(i))
@onlyCPU
def test_numpy_array_interface(self, device):
types = [
torch.DoubleTensor,
torch.FloatTensor,
torch.HalfTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.ByteTensor,
]
dtypes = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.uint8,
]
for tp, dtype in zip(types, dtypes):
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
if np.dtype(dtype).kind == "u": # type: ignore[misc]
# .type expects a XxxTensor, which have no type hints on
# purpose, so ignore during mypy type checking
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
else:
x = torch.tensor([1, -2, 3, -4]).type(tp) # type: ignore[call-overload]
array = np.array([1, -2, 3, -4], dtype=dtype)
# Test __array__ w/o dtype argument
asarray = np.asarray(x)
self.assertIsInstance(asarray, np.ndarray)
self.assertEqual(asarray.dtype, dtype)
for i in range(len(x)):
self.assertEqual(asarray[i], x[i])
# Test __array_wrap__, same dtype
abs_x = np.abs(x)
abs_array = np.abs(array)
self.assertIsInstance(abs_x, tp)
for i in range(len(x)):
self.assertEqual(abs_x[i], abs_array[i])
# Test __array__ with dtype argument
for dtype in dtypes:
x = torch.IntTensor([1, -2, 3, -4])
asarray = np.asarray(x, dtype=dtype)
self.assertEqual(asarray.dtype, dtype)
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
if np.dtype(dtype).kind == "u": # type: ignore[misc]
wrapped_x = np.array([1, -2, 3, -4]).astype(dtype)
for i in range(len(x)):
self.assertEqual(asarray[i], wrapped_x[i])
else:
for i in range(len(x)):
self.assertEqual(asarray[i], x[i])
# Test some math functions with float types
float_types = [torch.DoubleTensor, torch.FloatTensor]
float_dtypes = [np.float64, np.float32]
for tp, dtype in zip(float_types, float_dtypes):
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
for func in ["sin", "sqrt", "ceil"]:
ufunc = getattr(np, func)
res_x = ufunc(x)
res_array = ufunc(array)
self.assertIsInstance(res_x, tp)
for i in range(len(x)):
self.assertEqual(res_x[i], res_array[i])
# Test functions with boolean return value
for tp, dtype in zip(types, dtypes):
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
geq2_x = np.greater_equal(x, 2)
geq2_array = np.greater_equal(array, 2).astype("uint8")
self.assertIsInstance(geq2_x, torch.ByteTensor)
for i in range(len(x)):
self.assertEqual(geq2_x[i], geq2_array[i])
@onlyCPU
def test_multiplication_numpy_scalar(self, device) -> None:
for np_dtype in [
np.float32,
np.float64,
np.int32,
np.int64,
np.int16,
np.uint8,
]:
for t_dtype in [torch.float, torch.double]:
# mypy raises an error when np.floatXY(2.0) is called
# even though this is valid code
np_sc = np_dtype(2.0) # type: ignore[abstract, arg-type]
t = torch.ones(2, requires_grad=True, dtype=t_dtype)
r1 = t * np_sc
self.assertIsInstance(r1, torch.Tensor)
self.assertTrue(r1.dtype == t_dtype)
self.assertTrue(r1.requires_grad)
r2 = np_sc * t
self.assertIsInstance(r2, torch.Tensor)
self.assertTrue(r2.dtype == t_dtype)
self.assertTrue(r2.requires_grad)
@onlyCPU
@skipIfTorchDynamo()
def test_parse_numpy_int_overflow(self, device):
# assertRaises uses a try-except which dynamo has issues with
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
if np.__version__ > "2":
self.assertRaisesRegex(
OverflowError,
"out of bounds",
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1)),
) # type: ignore[call-overload]
else:
self.assertRaisesRegex(
ValueError,
"(Overflow|an integer is required)",
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1)),
) # type: ignore[call-overload]
@onlyCPU
def test_parse_numpy_int(self, device):
# https://github.com/pytorch/pytorch/issues/29252
for nptype in [np.int16, np.int8, np.uint8, np.int32, np.int64]:
scalar = 3
np_arr = np.array([scalar], dtype=nptype)
np_val = np_arr[0]
# np integral type can be treated as a python int in native functions with
# int parameters:
self.assertEqual(torch.ones(5).diag(scalar), torch.ones(5).diag(np_val))
self.assertEqual(
torch.ones([2, 2, 2, 2]).mean(scalar),
torch.ones([2, 2, 2, 2]).mean(np_val),
)
# numpy integral type parses like a python int in custom python bindings:
self.assertEqual(torch.Storage(np_val).size(), scalar) # type: ignore[attr-defined]
tensor = torch.tensor([2], dtype=torch.int)
tensor[0] = np_val
self.assertEqual(tensor[0], np_val)
# Original reported issue, np integral type parses to the correct
# PyTorch integral type when passed for a `Scalar` parameter in
# arithmetic operations:
t = torch.from_numpy(np_arr)
self.assertEqual((t + np_val).dtype, t.dtype)
self.assertEqual((np_val + t).dtype, t.dtype)
def test_has_storage_numpy(self, device):
for dtype in [np.float32, np.float64, np.int64, np.int32, np.int16, np.uint8]:
arr = np.array([1], dtype=dtype)
self.assertIsNotNone(
torch.tensor(arr, device=device, dtype=torch.float32).storage()
)
self.assertIsNotNone(
torch.tensor(arr, device=device, dtype=torch.double).storage()
)
self.assertIsNotNone(
torch.tensor(arr, device=device, dtype=torch.int).storage()
)
self.assertIsNotNone(
torch.tensor(arr, device=device, dtype=torch.long).storage()
)
self.assertIsNotNone(
torch.tensor(arr, device=device, dtype=torch.uint8).storage()
)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_numpy_scalar_cmp(self, device, dtype):
if dtype.is_complex:
tensors = (
torch.tensor(complex(1, 3), dtype=dtype, device=device),
torch.tensor([complex(1, 3), 0, 2j], dtype=dtype, device=device),
torch.tensor(
[[complex(3, 1), 0], [-1j, 5]], dtype=dtype, device=device
),
)
else:
tensors = (
torch.tensor(3, dtype=dtype, device=device),
torch.tensor([1, 0, -3], dtype=dtype, device=device),
torch.tensor([[3, 0, -1], [3, 5, 4]], dtype=dtype, device=device),
)
for tensor in tensors:
if dtype == torch.bfloat16:
with self.assertRaises(TypeError):
np_array = tensor.cpu().numpy()
continue
np_array = tensor.cpu().numpy()
for t, a in product(
(tensor.flatten()[0], tensor.flatten()[0].item()),
(np_array.flatten()[0], np_array.flatten()[0].item()),
):
self.assertEqual(t, a)
if (
dtype == torch.complex64
and torch.is_tensor(t)
and type(a) is np.complex64
):
# TODO: Imaginary part is dropped in this case. Need fix.
# https://github.com/pytorch/pytorch/issues/43579
self.assertFalse(t == a)
else:
self.assertTrue(t == a)
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test___eq__(self, device, dtype):
a = make_tensor((5, 7), dtype=dtype, device=device, low=-9, high=9)
b = a.detach().clone()
b_np = b.numpy()
# Check all elements equal
res_check = torch.ones_like(a, dtype=torch.bool)
self.assertEqual(a == b_np, res_check)
self.assertEqual(b_np == a, res_check)
# Check one element unequal
if dtype == torch.bool:
b[1][3] = not b[1][3]
else:
b[1][3] += 1
res_check[1][3] = False
self.assertEqual(a == b_np, res_check)
self.assertEqual(b_np == a, res_check)
# Check random elements unequal
rand = torch.randint(0, 2, a.shape, dtype=torch.bool)
res_check = rand.logical_not()
b.copy_(a)
if dtype == torch.bool:
b[rand] = b[rand].logical_not()
else:
b[rand] += 1
self.assertEqual(a == b_np, res_check)
self.assertEqual(b_np == a, res_check)
# Check all elements unequal
if dtype == torch.bool:
b.copy_(a.logical_not())
else:
b.copy_(a + 1)
res_check.fill_(False)
self.assertEqual(a == b_np, res_check)
self.assertEqual(b_np == a, res_check)
@onlyCPU
def test_empty_tensors_interop(self, device):
x = torch.rand((), dtype=torch.float16)
y = torch.tensor(np.random.rand(0), dtype=torch.float16)
# Same can be achieved by running
# y = torch.empty_strided((0,), (0,), dtype=torch.float16)
# Regression test for https://github.com/pytorch/pytorch/issues/115068
self.assertEqual(torch.true_divide(x, y).shape, y.shape)
# Regression test for https://github.com/pytorch/pytorch/issues/115066
self.assertEqual(torch.mul(x, y).shape, y.shape)
# Regression test for https://github.com/pytorch/pytorch/issues/113037
self.assertEqual(torch.div(x, y, rounding_mode="floor").shape, y.shape)
def test_ndarray_astype_object_graph_break(self):
@torch.compile(backend="eager", fullgraph=True)
def f(xs):
xs.astype("O")
xs = np.array([1, 2])
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported, "ndarray.astype\\(object\\)"
):
f(xs)
def test_ndarray_astype_object_graph_break_2(self):
@torch.compile(backend="eager", fullgraph=True)
def f(xs):
xs.astype(object)
xs = np.array([1, 2])
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported, "ndarray.astype\\(object\\)"
):
f(xs)
def test_copy_mode(self):
def f(x):
return np.array(x, copy=np._CopyMode.IF_NEEDED)
opt_f = torch.compile(backend="eager", fullgraph=True)(f)
x = np.array([1, 2, 3])
# Should run without throwing an exception
y = opt_f(x)
self.assertEqual(y, f(x))
instantiate_device_type_tests(TestNumPyInterop, globals())
if __name__ == "__main__":
run_tests()
| TestNumPyInterop |
python | cython__cython | Cython/TestUtils.py | {
"start": 4444,
"end": 7089
} | class ____(CythonTest):
"""
Utility base class for transform unit tests. It is based around constructing
test trees (either explicitly or by parsing a Cython code string); running
the transform, serialize it using a customized Cython serializer (with
special markup for nodes that cannot be represented in Cython),
and do a string-comparison line-by-line of the result.
To create a test case:
- Call run_pipeline. The pipeline should at least contain the transform you
are testing; pyx should be either a string (passed to the parser to
create a post-parse tree) or a node representing input to pipeline.
The result will be a transformed result.
- Check that the tree is correct. If wanted, assertCode can be used, which
takes a code string as expected, and a ModuleNode in result_tree
(it serializes the ModuleNode to a string and compares line-by-line).
All code strings are first stripped for whitespace lines and then common
indentation.
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
def run_pipeline(self, pipeline, pyx, pxds=None):
if pxds is None:
pxds = {}
tree = self.fragment(pyx, pxds).root
# Run pipeline
for T in pipeline:
tree = T(tree)
return tree
# For the test C code validation, we have to take care that the test directives (and thus
# the match strings) do not just appear in (multiline) C code comments containing the original
# Cython source code. Thus, we discard the comments before matching.
# This seems a prime case for re.VERBOSE, but it seems to match some of the whitespace.
_strip_c_comments = partial(re.compile(
re.sub(r'\s+', '', r'''
/[*] (
(?: [^*\n] | [*][^/] )*
[\n]
(?: [^*] | [*][^/] )*
) [*]/
''')
).sub, '')
_strip_cython_code_from_html = partial(re.compile(
re.sub(r'\s\s+', '', r'''
(?:
<pre class=["'][^"']*cython\s+line[^"']*["']\s*>
(?:[^<]|<(?!/pre))+
</pre>
)|(?:
<style[^>]*>
(?:[^<]|<(?!/style))+
</style>
)
''')
).sub, '')
def _parse_pattern(pattern):
start = end = None
if pattern.startswith('/'):
start, pattern = re.split(r"(?<!\\)/", pattern[1:], maxsplit=1)
pattern = pattern.strip()
if pattern.startswith(':'):
pattern = pattern[1:].strip()
if pattern.startswith("/"):
end, pattern = re.split(r"(?<!\\)/", pattern[1:], maxsplit=1)
pattern = pattern.strip()
return start, end, pattern
| TransformTest |
python | getsentry__sentry | src/sentry/api/serializers/models/grouprelease.py | {
"start": 1131,
"end": 3043
} | class ____(GroupReleaseSerializer):
STATS_PERIODS = {
"24h": StatsPeriod(24, timedelta(hours=1)),
"30d": StatsPeriod(30, timedelta(hours=24)),
}
def __init__(self, since=None, until=None):
self.since = since
self.until = until
def get_attrs(self, item_list, user, **kwargs):
attrs = super().get_attrs(item_list, user)
tenant_ids = (
{
"organization_id": Project.objects.get_from_cache(
id=item_list[0].project_id
).organization_id
}
if item_list
else None
)
items: dict[str, list[str]] = {}
for item in item_list:
items.setdefault(item.group_id, []).append(item.id)
attrs[item]["stats"] = {}
for key, (segments, interval) in self.STATS_PERIODS.items():
until = self.until or timezone.now()
since = self.since or until - (segments * interval)
try:
stats = tsdb.backend.get_frequency_series(
model=TSDBModel.frequent_releases_by_group,
items=items,
start=since,
end=until,
rollup=int(interval.total_seconds()),
tenant_ids=tenant_ids,
)
except NotImplementedError:
# TODO(dcramer): probably should log this, but not worth
# erring out
stats = {}
for item in item_list:
attrs[item]["stats"][key] = [
(k, v[item.id]) for k, v in stats.get(item.group_id, {})
]
return attrs
def serialize(self, obj, attrs, user, **kwargs):
result = super().serialize(obj, attrs, user)
result["stats"] = attrs["stats"]
return result
| GroupReleaseWithStatsSerializer |
python | pytorch__pytorch | torch/_dynamo/pgo.py | {
"start": 6318,
"end": 6777
} | class ____:
automatic_dynamic: defaultdict[str, FrameStateSizeEntry] = dataclasses.field(
# pyrefly: ignore [unbound-name]
default_factory=lambda: defaultdict(FrameStateSizeEntry)
)
_INIT_CODE_STATE: Optional[defaultdict[CodeId, CodeState]] = None
_CODE_STATE: Optional[defaultdict[CodeId, CodeState]] = None
_LOGGED_DYNAMIC_ALLOWLIST: bool = False
_KNOWN_DYNAMIC_SOURCES: set[str] = set()
@dataclasses.dataclass(frozen=True)
| CodeState |
python | pyca__cryptography | src/cryptography/x509/general_name.py | {
"start": 586,
"end": 766
} | class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def value(self) -> typing.Any:
"""
Return the value of the object
"""
| GeneralName |
python | ansible__ansible | test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py | {
"start": 352,
"end": 726
} | class ____(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'usercallback'
def __init__(self):
super(CallbackModule, self).__init__()
self._display.display("loaded usercallback from collection, yay")
def v2_runner_on_ok(self, result):
self._display.display(self.get_option("ok_msg"))
| CallbackModule |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 33977,
"end": 42521
} | class ____(object):
"""Handles worker preemptions."""
def __init__(self, server_def, cluster):
self._server_def = server_def
self._cluster = cluster
self._cluster_update_lock = threading.Lock()
self._cluster_due_for_update_or_finish = threading.Event()
self._worker_up_cond = threading.Condition(self._cluster_update_lock)
self._error_from_recovery = None
self._should_preemption_thread_run = True
self._preemption_handler_thread = threading.Thread(
target=self._preemption_handler,
name="WorkerPreemptionHandler",
daemon=True)
self._preemption_handler_thread.start()
def stop(self):
"""Ensure the worker preemption thread is closed."""
self._should_preemption_thread_run = False
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
# TODO(yuefengz): The preemption handler thread shouldn't be terminated
# asynchronously since it touches eager context which is a process-wide
# singleton. The problem is in OSS unit tests will time out.
def _validate_preemption_failure(self, e):
"""Validates that the given exception represents worker preemption."""
# Only categorize the failure as a worker preemption if the cancellation
# manager did not attempt to cancel the blocking operations.
if _is_worker_failure(e) and (
not self._cluster.closure_queue._cancellation_mgr.is_cancelled): # pylint: disable=protected-access
metric_utils.monitor_increment_counter("worker_failures")
return
raise e
@contextlib.contextmanager
def wait_on_failure(self,
on_failure_fn=None,
on_transient_failure_fn=None,
on_recovery_fn=None,
worker_device_name="(unknown)"):
"""Catches worker preemption error and wait until failed workers are back.
Args:
on_failure_fn: an optional function to run if preemption happens.
on_transient_failure_fn: an optional function to run if transient failure
happens.
on_recovery_fn: an optional function to run when a worker is recovered
from preemption.
worker_device_name: the device name of the worker instance that is passing
through the failure.
Yields:
None.
"""
assert self._should_preemption_thread_run
try:
yield
except (errors.OpError, ClosureInputError,
ClosureAbortedError, TypeError) as e:
# If the error is due to temporary connectivity issues between worker and
# ps, put back closure, ignore error and do not mark worker as failure.
if self._cluster._record_and_ignore_transient_ps_failure(e): # pylint: disable=protected-access
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"It is treated as a transient connectivity failure for now.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# If the error is due to temporary connectivity issues that cause the
# server-side RPCs to be cancelled, TF might not abort the step and the
# closure might timeout. The coordinator ignores certain amount of such
# failures without marking worker as failure.
if self._cluster._record_and_ignore_transient_timeouts(e): # pylint: disable=protected-access
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"This derived error is ignored and not reported to users.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# Ignoring derived CancelledErrors to tolerate transient failures in
# PS-worker communication, which initially exposed as an UnavailableError
# and then lead to sub-function cancellation, subsequently getting
# reported from worker to chief as CancelledError.
# We do not mark either worker or PS as failed due to only CancelledError.
# If there are real (non-transient) failures, they must also be reported
# as other errors (UnavailableError most likely) in closure executions.
if isinstance(e, errors.CancelledError) and "/job:" in str(e):
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"This derived error is ignored and not reported to users.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# This reraises the error, if it's not considered recoverable; otherwise,
# the following failure recovery logic run. At this time, only worker
# unavailability is recoverable. PS unavailability as well as other
# errors in the user function is not recoverable.
self._validate_preemption_failure(e)
logging.error("Worker %s failed with %r:%s", worker_device_name, e, e)
if on_failure_fn:
on_failure_fn(e)
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)
if self._error_from_recovery:
# TODO(yuefengz): there is only one worker that will get this error.
# Ideally we should let all workers notified by `_worker_up_cond` get
# this error.
try:
raise self._error_from_recovery
finally:
self._error_from_recovery = None
logging.info("Worker %s has been recovered.", worker_device_name)
if on_recovery_fn:
logging.info("Worker %s calling on_recovery_fn", worker_device_name)
with self.wait_on_failure(
on_recovery_fn=on_recovery_fn,
on_transient_failure_fn=on_transient_failure_fn,
worker_device_name=worker_device_name):
on_recovery_fn()
def _preemption_handler(self):
"""A loop that handles preemption.
This loop waits for signal of worker preemption and upon worker preemption,
it waits until all workers are back and updates the cluster about the
restarted workers.
"""
assert self._should_preemption_thread_run
while True:
self._cluster_due_for_update_or_finish.wait()
if not self._should_preemption_thread_run:
logging.info("Stopping the failure handing thread.")
break
with self._cluster_update_lock:
try:
# TODO(haoyuzhang): support partial cluster recovery
logging.info("Cluster now being recovered.")
with metric_utils.monitored_timer("server_def_update"):
context.context().update_server_def(self._server_def)
# Cluster updated successfully, clear the update signal, and notify
# all workers that they are recovered from failure.
logging.info("Cluster successfully recovered.")
self._worker_up_cond.notify_all()
# The check for _should_preemption_thread_run is necessary since the
# `stop` may have already set _cluster_due_for_update_or_finish.
if self._should_preemption_thread_run:
self._cluster_due_for_update_or_finish.clear()
except Exception as e: # pylint: disable=broad-except
logging.info("Error occurred while updating server def: %s", e)
try:
self._validate_preemption_failure(e)
except Exception as ps_e: # pylint: disable=broad-except
logging.info("Error that occurred while updating server def is not "
"a worker failure. So set it as _error_from_recovery")
# In this case, a parameter server fails. So we raise this error to
# the caller of `wait_on_failure`.
self._error_from_recovery = ps_e
self._worker_up_cond.notify_all()
if self._should_preemption_thread_run:
self._cluster_due_for_update_or_finish.clear()
# NOTE: Since the first RPC (GetStatus) of update_server_def is
# currently blocking by default, error should only happen if:
# (1) More workers failed while waiting for the previous workers to
# come back;
# (2) Worker failed when exchanging subsequent RPCs after the first
# RPC returns.
# Consider adding backoff retry logic if we see the error logged
# too frequently.
logging.error("Cluster update failed with error: %s. Retrying...", e)
| WorkerPreemptionHandler |
python | doocs__leetcode | solution/0600-0699/0633.Sum of Square Numbers/Solution2.py | {
"start": 0,
"end": 356
} | class ____:
def judgeSquareSum(self, c: int) -> bool:
for i in range(2, int(sqrt(c)) + 1):
if c % i == 0:
exp = 0
while c % i == 0:
c //= i
exp += 1
if i % 4 == 3 and exp % 2 != 0:
return False
return c % 4 != 3
| Solution |
python | django-import-export__django-import-export | tests/core/migrations/0009_auto_20211111_0807.py | {
"start": 92,
"end": 3131
} | class ____(migrations.Migration):
dependencies = [
("core", "0008_auto_20190409_0846"),
]
operations = [
migrations.AlterField(
model_name="author",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="book",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="category",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="child",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="entry",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="parent",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="person",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="profile",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="role",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="withdefault",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="withdynamicdefault",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="withfloatfield",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| Migration |
python | gevent__gevent | src/greentest/3.9/test_smtpd.py | {
"start": 10810,
"end": 30886
} | class ____(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer((socket_helper.HOST, 0), ('b', 0),
decode_data=True)
conn, addr = self.server.accept()
self.channel = smtpd.SMTPChannel(self.server, conn, addr,
decode_data=True)
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
smtpd.DEBUGSTREAM = self.old_debugstream
def write_line(self, line):
self.channel.socket.queue_recv(line)
self.channel.handle_read()
def test_broken_connect(self):
self.assertRaises(
DummyDispatcherBroken, BrokenDummyServer,
(socket_helper.HOST, 0), ('b', 0), decode_data=True)
def test_decode_data_and_enable_SMTPUTF8_raises(self):
self.assertRaises(
ValueError, smtpd.SMTPChannel,
self.server, self.channel.conn, self.channel.addr,
enable_SMTPUTF8=True, decode_data=True)
def test_server_accept(self):
self.server.handle_accept()
def test_missing_data(self):
self.write_line(b'')
self.assertEqual(self.channel.socket.last,
b'500 Error: bad syntax\r\n')
def test_EHLO(self):
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last, b'250 HELP\r\n')
def test_EHLO_bad_syntax(self):
self.write_line(b'EHLO')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: EHLO hostname\r\n')
def test_EHLO_duplicate(self):
self.write_line(b'EHLO example')
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_EHLO_HELO_duplicate(self):
self.write_line(b'EHLO example')
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELO(self):
name = smtpd.socket.getfqdn()
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
'250 {}\r\n'.format(name).encode('ascii'))
def test_HELO_EHLO_duplicate(self):
self.write_line(b'HELO example')
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELP(self):
self.write_line(b'HELP')
self.assertEqual(self.channel.socket.last,
b'250 Supported commands: EHLO HELO MAIL RCPT ' + \
b'DATA RSET NOOP QUIT VRFY\r\n')
def test_HELP_command(self):
self.write_line(b'HELP MAIL')
self.assertEqual(self.channel.socket.last,
b'250 Syntax: MAIL FROM: <address>\r\n')
def test_HELP_command_unknown(self):
self.write_line(b'HELP SPAM')
self.assertEqual(self.channel.socket.last,
b'501 Supported commands: EHLO HELO MAIL RCPT ' + \
b'DATA RSET NOOP QUIT VRFY\r\n')
def test_HELO_bad_syntax(self):
self.write_line(b'HELO')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: HELO hostname\r\n')
def test_HELO_duplicate(self):
self.write_line(b'HELO example')
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELO_parameter_rejected_when_extensions_not_enabled(self):
self.extended_smtp = False
self.write_line(b'HELO example')
self.write_line(b'MAIL from:<foo@example.com> SIZE=1234')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_allows_space_after_colon(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from: <foo@example.com>')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_extended_MAIL_allows_space_after_colon(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <foo@example.com> size=20')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_NOOP(self):
self.write_line(b'NOOP')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_HELO_NOOP(self):
self.write_line(b'HELO example')
self.write_line(b'NOOP')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_NOOP_bad_syntax(self):
self.write_line(b'NOOP hi')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: NOOP\r\n')
def test_QUIT(self):
self.write_line(b'QUIT')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_HELO_QUIT(self):
self.write_line(b'HELO example')
self.write_line(b'QUIT')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_QUIT_arg_ignored(self):
self.write_line(b'QUIT bye bye')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_bad_state(self):
self.channel.smtp_state = 'BAD STATE'
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'451 Internal confusion\r\n')
def test_command_too_long(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from: ' +
b'a' * self.channel.command_size_limit +
b'@example')
self.assertEqual(self.channel.socket.last,
b'500 Error: line too long\r\n')
def test_MAIL_command_limit_extended_with_SIZE(self):
self.write_line(b'EHLO example')
fill_len = self.channel.command_size_limit - len('MAIL from:<@example>')
self.write_line(b'MAIL from:<' +
b'a' * fill_len +
b'@example> SIZE=1234')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'MAIL from:<' +
b'a' * (fill_len + 26) +
b'@example> SIZE=1234')
self.assertEqual(self.channel.socket.last,
b'500 Error: line too long\r\n')
def test_MAIL_command_rejects_SMTPUTF8_by_default(self):
self.write_line(b'EHLO example')
self.write_line(
b'MAIL from: <naive@example.com> BODY=8BITMIME SMTPUTF8')
self.assertEqual(self.channel.socket.last[0:1], b'5')
def test_data_longer_than_default_data_size_limit(self):
# Hack the default so we don't have to generate so much data.
self.channel.data_size_limit = 1048
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'A' * self.channel.data_size_limit +
b'A\r\n.')
self.assertEqual(self.channel.socket.last,
b'552 Error: Too much mail data\r\n')
def test_MAIL_size_parameter(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=512')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_MAIL_invalid_size_parameter(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=invalid')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address> [SP <mail-parameters>]\r\n')
def test_MAIL_RCPT_unknown_parameters(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> ham=green')
self.assertEqual(self.channel.socket.last,
b'555 MAIL FROM parameters not recognized or not implemented\r\n')
self.write_line(b'MAIL FROM:<eggs@example>')
self.write_line(b'RCPT TO:<eggs@example> ham=green')
self.assertEqual(self.channel.socket.last,
b'555 RCPT TO parameters not recognized or not implemented\r\n')
def test_MAIL_size_parameter_larger_than_default_data_size_limit(self):
self.channel.data_size_limit = 1048
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=2096')
self.assertEqual(self.channel.socket.last,
b'552 Error: message size exceeds fixed maximum message size\r\n')
def test_need_MAIL(self):
self.write_line(b'HELO example')
self.write_line(b'RCPT to:spam@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: need MAIL command\r\n')
def test_MAIL_syntax_HELO(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_syntax_EHLO(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address> [SP <mail-parameters>]\r\n')
def test_MAIL_missing_address(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_chevrons(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:<eggs@example>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_MAIL_empty_chevrons(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from:<>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_MAIL_quoted_localpart(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <"Fred Blogs"@example.com>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_no_angles(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: "Fred Blogs"@example.com')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_with_size(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <"Fred Blogs"@example.com> SIZE=1000')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_with_size_no_angles(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: "Fred Blogs"@example.com SIZE=1000')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_nested_MAIL(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:eggs@example')
self.write_line(b'MAIL from:spam@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: nested MAIL command\r\n')
def test_VRFY(self):
self.write_line(b'VRFY eggs@example')
self.assertEqual(self.channel.socket.last,
b'252 Cannot VRFY user, but will accept message and attempt ' + \
b'delivery\r\n')
def test_VRFY_syntax(self):
self.write_line(b'VRFY')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: VRFY <address>\r\n')
def test_EXPN_not_implemented(self):
self.write_line(b'EXPN')
self.assertEqual(self.channel.socket.last,
b'502 EXPN not implemented\r\n')
def test_no_HELO_MAIL(self):
self.write_line(b'MAIL from:<foo@example.com>')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_need_RCPT(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'503 Error: need RCPT command\r\n')
def test_RCPT_syntax_HELO(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: RCPT TO: <address>\r\n')
def test_RCPT_syntax_EHLO(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: RCPT TO: <address> [SP <mail-parameters>]\r\n')
def test_RCPT_lowercase_to_OK(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to: <eggs@example>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_no_HELO_RCPT(self):
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_data_dialog(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'RCPT To:spam@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'354 End data with <CR><LF>.<CR><LF>\r\n')
self.write_line(b'data\r\nmore\r\n.')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.server.messages,
[(('peer-address', 'peer-port'),
'eggs@example',
['spam@example'],
'data\nmore')])
def test_DATA_syntax(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA spam')
self.assertEqual(self.channel.socket.last, b'501 Syntax: DATA\r\n')
def test_no_HELO_DATA(self):
self.write_line(b'DATA spam')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_data_transparency_section_4_5_2(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'..\r\n.\r\n')
self.assertEqual(self.channel.received_data, '.')
def test_multiple_RCPT(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'RCPT To:ham@example')
self.write_line(b'DATA')
self.write_line(b'data\r\n.')
self.assertEqual(self.server.messages,
[(('peer-address', 'peer-port'),
'eggs@example',
['spam@example','ham@example'],
'data')])
def test_manual_status(self):
# checks that the Channel is able to return a custom status message
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'return status\r\n.')
self.assertEqual(self.channel.socket.last, b'250 Okish\r\n')
def test_RSET(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'RSET')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'MAIL From:foo@example')
self.write_line(b'RCPT To:eggs@example')
self.write_line(b'DATA')
self.write_line(b'data\r\n.')
self.assertEqual(self.server.messages,
[(('peer-address', 'peer-port'),
'foo@example',
['eggs@example'],
'data')])
def test_HELO_RSET(self):
self.write_line(b'HELO example')
self.write_line(b'RSET')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_RSET_syntax(self):
self.write_line(b'RSET hi')
self.assertEqual(self.channel.socket.last, b'501 Syntax: RSET\r\n')
def test_unknown_command(self):
self.write_line(b'UNKNOWN_CMD')
self.assertEqual(self.channel.socket.last,
b'500 Error: command "UNKNOWN_CMD" not ' + \
b'recognized\r\n')
def test_attribute_deprecations(self):
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__server
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__server = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__line
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__line = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__state
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__state = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__greeting
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__greeting = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__mailfrom
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__mailfrom = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__rcpttos
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__rcpttos = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__data
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__data = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__fqdn
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__fqdn = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__peer
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__peer = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__conn
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__conn = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__addr
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__addr = 'spam'
@unittest.skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
| SMTPDChannelTest |
python | django__django | tests/gis_tests/geoapp/feeds.py | {
"start": 859,
"end": 929
} | class ____(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
| TestGeoAtom1 |
python | django__django | tests/template_tests/filter_tests/test_urlencode.py | {
"start": 629,
"end": 866
} | class ____(SimpleTestCase):
def test_urlencode(self):
self.assertEqual(urlencode("fran\xe7ois & jill"), "fran%C3%A7ois%20%26%20jill")
def test_non_string_input(self):
self.assertEqual(urlencode(1), "1")
| FunctionTests |
python | huggingface__transformers | src/transformers/models/jetmoe/modular_jetmoe.py | {
"start": 18156,
"end": 21231
} | class ____(MixtralModel):
def __init__(self, config: JetMoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[JetMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self._attn_implementation = config._attn_implementation
self.norm = JetMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
| JetMoeModel |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 23046,
"end": 24319
} | class ____:
"""Adaptor to the old compiler spec interface. Exposes just a few attributes"""
def __init__(self, spec):
self.spec = spec
@property
def name(self):
return self.spec.name
@property
def version(self):
return self.spec.version
@property
def versions(self):
return self.spec.versions
@property
def display_str(self):
"""Equivalent to ``{compiler.name}{@compiler.version}`` for Specs, without extra
``@=`` for readability."""
if self.versions != vn.any_version:
return self.spec.format("{name}{@version}")
return self.spec.format("{name}")
def __lt__(self, other):
if not isinstance(other, CompilerSpec):
return self.spec < other
return self.spec < other.spec
def __eq__(self, other):
if not isinstance(other, CompilerSpec):
return self.spec == other
return self.spec == other.spec
def __hash__(self):
return hash(self.spec)
def __str__(self):
return str(self.spec)
def _cmp_iter(self):
return self.spec._cmp_iter()
def __bool__(self):
if self.spec == Spec():
return False
return bool(self.spec)
| CompilerSpec |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/functional_metric.py | {
"start": 192,
"end": 1705
} | class ____(FunctionalBatchMetricCallback):
"""
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
metric_fn: metric function, that get outputs, targets
and return score as torch.Tensor
metric_key: key to store computed metric in ``runner.batch_metrics`` dictionary
compute_on_call: Computes and returns metric value during metric call.
Used for per-batch logging. default: True
log_on_batch: boolean flag to log computed metrics every batch
prefix: metric prefix
suffix: metric suffix
"""
def __init__(
self,
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
metric_fn: Callable,
metric_key: str,
compute_on_call: bool = True,
log_on_batch: bool = True,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=FunctionalBatchMetric(
metric_fn=metric_fn,
metric_key=metric_key,
compute_on_call=compute_on_call,
prefix=prefix,
suffix=suffix,
),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
__all__ = ["FunctionalMetricCallback"]
| FunctionalMetricCallback |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py | {
"start": 3897,
"end": 4833
} | class ____(DepthAnythingFeatureFusionStage):
def forward(self, hidden_states, size=None, prompt_depth=None):
# reversing the hidden_states, we start from the last
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)):
size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None
if fused_hidden_state is None:
# first layer only uses the last hidden_state
fused_hidden_state = layer(hidden_state, size=size, prompt_depth=prompt_depth)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size, prompt_depth=prompt_depth)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
| PromptDepthAnythingFeatureFusionStage |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_coding_agents.py | {
"start": 2470,
"end": 6726
} | class ____(APITestCase):
"""Base test class with common setup for coding agent endpoint tests."""
endpoint = "sentry-api-0-organization-coding-agents"
def setUp(self):
super().setUp()
self.login_as(self.user)
self._setup_mock_integration()
def _setup_mock_integration(self):
"""Set up mock integration and related objects for testing."""
# Mock the coding agent providers to include our mock provider
self.mock_provider = MockCodingAgentProvider()
# Create a GitHub integration to use as our test integration
self.integration = self.create_integration(
organization=self.organization,
provider="github",
name="GitHub",
external_id="github:123",
metadata={
"api_key": "test_api_key_123",
"domain_name": "github.com",
},
)
# Create proper RPC objects using serialization
self.rpc_integration = serialize_integration(self.integration)
# Get the organization integration and serialize it using the integration service
from sentry.integrations.services.integration import integration_service
org_integration = integration_service.get_organization_integration(
integration_id=self.integration.id,
organization_id=self.organization.id,
)
self.rpc_org_integration = org_integration
# Create mock installation
self.mock_installation = MockCodingAgentInstallation(self.integration, self.organization.id)
def _create_mock_rpc_integration(self):
"""Create a mock RPC integration for testing."""
mock_rpc_integration = MagicMock()
mock_rpc_integration.id = self.integration.id
mock_rpc_integration.name = self.integration.name
mock_rpc_integration.provider = "github"
mock_rpc_integration.metadata = self.integration.metadata
mock_rpc_integration.get_installation = MagicMock(return_value=self.mock_installation)
return mock_rpc_integration
def _create_mock_autofix_state(self, repos=None):
"""Create a mock autofix state for testing."""
if repos is None:
repos = [
SeerRepoDefinition(
organization_id=self.organization.id,
integration_id=str(self.integration.id),
owner="test",
name="repo",
external_id="123",
provider="github",
)
]
return AutofixState.validate(
{
"run_id": 123,
"updated_at": datetime.now(UTC),
"status": AutofixStatus.PROCESSING,
"request": {
"organization_id": self.organization.id,
"project_id": self.project.id,
"repos": repos,
"issue": {"id": 123, "title": "Test Issue"},
},
"steps": [
{
"key": "solution",
"solution": [
{"relevant_code_file": {"repo_name": "owner1/repo1"}},
{"relevant_code_file": {"repo_name": "owner2/repo2"}},
],
}
],
}
)
def mock_integration_service_calls(self, integrations=None):
"""Helper to mock integration service calls for GET endpoint."""
from unittest.mock import patch
from sentry.integrations.services.integration import integration_service
integrations = integrations or []
@contextlib.contextmanager
def mock_context():
with (
patch.object(
integration_service,
"get_integrations",
return_value=integrations,
),
patch(
"sentry.seer.autofix.coding_agent.get_coding_agent_providers",
return_value=["test_provider"],
),
):
yield
return mock_context()
| BaseOrganizationCodingAgentsTest |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 2899,
"end": 5252
} | class ____(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Masking(mask_value=0.,
input_shape=(timesteps, features)))
model.add(tf.keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
See [the masking and padding guide](
https://www.tensorflow.org/guide/keras/masking_and_padding)
for more details.
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Masking |
python | sympy__sympy | sympy/polys/domains/gmpyrationalfield.py | {
"start": 353,
"end": 3178
} | class ____(RationalField):
"""Rational field based on GMPY's ``mpq`` type.
This will be the implementation of :ref:`QQ` if ``gmpy`` or ``gmpy2`` is
installed. Elements will be of type ``gmpy.mpq``.
"""
dtype = GMPYRational
zero = dtype(0)
one = dtype(1)
tp = type(one)
alias = 'QQ_gmpy'
def __init__(self):
pass
def get_ring(self):
"""Returns ring associated with ``self``. """
from sympy.polys.domains import GMPYIntegerRing
return GMPYIntegerRing()
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyRational(int(gmpy_numer(a)),
int(gmpy_denom(a)))
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Rational:
return GMPYRational(a.p, a.q)
elif a.is_Float:
from sympy.polys.domains import RR
return GMPYRational(*map(int, RR.to_rational(a)))
else:
raise CoercionFailed("expected ``Rational`` object, got %s" % a)
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return GMPYRational(a.numerator, a.denominator)
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return a
def from_GaussianRationalField(K1, a, K0):
"""Convert a ``GaussianElement`` object to ``dtype``. """
if a.y == 0:
return GMPYRational(a.x)
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return GMPYRational(*map(int, K0.to_rational(a)))
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies nothing. """
return self.zero
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b), self.zero
def numer(self, a):
"""Returns numerator of ``a``. """
return a.numerator
def denom(self, a):
"""Returns denominator of ``a``. """
return a.denominator
def factorial(self, a):
"""Returns factorial of ``a``. """
return GMPYRational(gmpy_factorial(int(a)))
| GMPYRationalField |
python | kamyu104__LeetCode-Solutions | Python/apply-operations-on-array-to-maximize-sum-of-squares.py | {
"start": 90,
"end": 553
} | class ____(object):
def maxSum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
l = max(nums).bit_length()
cnt = [0]*l
for i in xrange(l):
for x in nums:
if x&(1<<i):
cnt[i] += 1
return reduce(lambda x, y: (x+y)%MOD, (sum(1<<i for i in xrange(l) if cnt[i] >= j)**2 for j in xrange(1, k+1)))
| Solution |
python | google__pytype | pytype/tests/test_quick2.py | {
"start": 233,
"end": 3939
} | class ____(test_base.BaseTest):
"""Tests for --quick."""
@classmethod
def setUpClass(cls):
super().setUpClass()
for method in ("Check", "CheckWithErrors", "Infer", "InferWithErrors"):
setattr(cls, method, make_quick(getattr(cls, method)))
def test_multiple_returns(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def add(x: int, y: int) -> int: ...
def add(x: int, y: float) -> float: ...
""",
)
self.Check(
"""
import foo
def f1():
f2()
def f2() -> int:
return foo.add(42, f3())
def f3():
return 42
""",
pythonpath=[d.path],
)
def test_multiple_returns_container(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Tuple
def concat(x: int, y: int) -> Tuple[int, int]: ...
def concat(x: int, y: float) -> Tuple[int, float]: ...
""",
)
self.Check(
"""
from typing import Tuple
import foo
def f1():
f2()
def f2() -> Tuple[int, int]:
return foo.concat(42, f3())
def f3():
return 42
""",
pythonpath=[d.path],
)
def test_noreturn(self):
self.Check("""
from typing import NoReturn
class A:
pass
class B:
def _raise_notimplemented(self) -> NoReturn:
raise NotImplementedError()
def f(self, x):
if __random__:
outputs = 42
else:
self._raise_notimplemented()
return outputs
def g(self):
outputs = self.f(A())
""")
def test_use_return_annotation(self):
self.Check("""
class Foo:
def __init__(self):
self.x = 3
class Bar:
def __init__(self):
self.f()
def f(self):
assert_type(self.g().x, int)
def g(self) -> Foo:
return Foo()
""")
def test_use_return_annotation_with_typevar(self):
self.Check("""
from typing import List, TypeVar
T = TypeVar('T')
class Foo:
def __init__(self):
x = self.f()
assert_type(x, List[int])
def f(self):
return self.g(0)
def g(self, x: T) -> List[T]:
return [x]
""")
def test_use_return_annotation_on_new(self):
self.Check("""
class Foo:
def __new__(cls) -> "Foo":
self = cls()
self.x = __any_object__
return self
def __init__(self):
self.y = 0
def f():
foo = Foo()
assert_type(foo.x, "Any")
assert_type(foo.y, "int")
""")
def test_async(self):
self.Check("""
async def f1() -> None:
await f2()
async def f2() -> None:
await f3()
async def f3() -> None:
pass
""")
def test_typevar_return(self):
self.Check("""
from typing import Sequence, TypeVar
class TestClass(int):
def __init__(self):
pass
_T = TypeVar('_T', bound=int)
def transform(t: _T) -> _T:
return t
def last_after_transform(t: Sequence[TestClass]) -> TestClass:
arr = [transform(val) for val in t]
return arr.pop(0)
""")
def test_type_of_typevar(self):
self.Check("""
from typing import Type, TypeVar
T = TypeVar('T', str, int)
def f(x: Type[T]) -> T:
return x()
def g(x: Type[T]) -> T:
return f(x)
def h():
return g(int)
""")
if __name__ == "__main__":
test_base.main()
| QuickTest |
python | huggingface__transformers | tests/models/perceiver/test_modeling_perceiver.py | {
"start": 2162,
"end": 11622
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
num_channels=3,
image_size=32,
train_size=[20, 20],
num_frames=5,
audio_samples_per_frame=200,
samples_per_patch=20,
nchunks=20,
num_latents=10,
d_latents=20,
d_model=64,
num_blocks=1,
num_self_attends_per_block=2,
num_self_attention_heads=1,
num_cross_attention_heads=1,
self_attention_widening_factor=4,
cross_attention_widening_factor=4,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_act="gelu",
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
max_position_embeddings=7,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.image_size = image_size
self.train_size = train_size
self.num_frames = num_frames
self.audio_samples_per_frame = audio_samples_per_frame
self.samples_per_patch = samples_per_patch
self.nchunks = nchunks
self.num_latents = num_latents
self.d_latents = d_latents
self.d_model = d_model
self.num_blocks = num_blocks
self.num_self_attends_per_block = num_self_attends_per_block
self.num_self_attention_heads = num_self_attention_heads
self.num_cross_attention_heads = num_cross_attention_heads
self.self_attention_widening_factor = self_attention_widening_factor
self.cross_attention_widening_factor = cross_attention_widening_factor
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_act = hidden_act
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
# set subsampling for multimodal model (take first chunk)
image_chunk_size = np.prod((self.num_frames, self.image_size, self.image_size)) // self.nchunks
audio_chunk_size = self.num_frames * self.audio_samples_per_frame // self.samples_per_patch // self.nchunks
self.subsampling = {
"image": torch.arange(0, image_chunk_size),
"audio": torch.arange(0, audio_chunk_size),
"label": None,
}
def prepare_config_and_inputs(self, model_class=None):
config = self.get_config()
input_mask = None
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.num_labels)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
if model_class is None or model_class.__name__ == "PerceiverModel":
inputs = floats_tensor([self.batch_size, self.seq_length, config.d_model], scale=1.0)
return config, inputs, input_mask, sequence_labels, token_labels
elif model_class.__name__ in ["PerceiverForMaskedLM", "PerceiverForSequenceClassification"]:
inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# input mask is only relevant for text inputs
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
elif model_class.__name__ == "PerceiverForImageClassificationLearned":
inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
elif model_class.__name__ == "PerceiverForImageClassificationFourier":
inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
elif model_class.__name__ == "PerceiverForImageClassificationConvProcessing":
inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
elif model_class.__name__ == "PerceiverForOpticalFlow":
inputs = floats_tensor([self.batch_size, 2, 27, self.train_size[0], self.train_size[1]])
elif model_class.__name__ == "PerceiverForMultimodalAutoencoding":
images = torch.randn(
(self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size),
device=torch_device,
)
audio = torch.randn(
(self.batch_size, self.num_frames * self.audio_samples_per_frame, 1), device=torch_device
)
inputs = {
"image": images,
"audio": audio,
"label": torch.zeros((self.batch_size, self.num_labels), device=torch_device),
}
else:
raise ValueError(f"Model class {model_class} not supported")
return config, inputs, input_mask, sequence_labels, token_labels
def get_config(self):
return PerceiverConfig(
num_latents=self.num_latents,
d_latents=self.d_latents,
d_model=self.d_model,
qk_channels=self.d_latents,
v_channels=self.d_latents,
num_blocks=self.num_blocks,
num_self_attends_per_block=self.num_self_attends_per_block,
num_self_attention_heads=self.num_self_attention_heads,
num_cross_attention_heads=self.num_cross_attention_heads,
self_attention_widening_factor=self.self_attention_widening_factor,
cross_attention_widening_factor=self.cross_attention_widening_factor,
vocab_size=self.vocab_size,
hidden_act=self.hidden_act,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
max_position_embeddings=self.max_position_embeddings,
image_size=self.image_size,
train_size=self.train_size,
num_frames=self.num_frames,
audio_samples_per_frame=self.audio_samples_per_frame,
samples_per_patch=self.samples_per_patch,
num_labels=self.num_labels,
output_num_channels=32,
_label_trainable_num_channels=16,
)
def get_pipeline_config(self):
config = self.get_config()
# Byte level vocab
config.vocab_size = 261
config.max_position_embeddings = 40
return config
def create_and_check_for_masked_lm(self, config, inputs, input_mask, sequence_labels, token_labels):
model = PerceiverForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(inputs, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(self, config, inputs, input_mask, sequence_labels, token_labels):
model = PerceiverForSequenceClassification(config=config)
model.to(torch_device)
model.eval()
result = model(inputs, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_image_classification_learned(
self, config, inputs, input_mask, sequence_labels, token_labels
):
model = PerceiverForImageClassificationLearned(config=config)
model.to(torch_device)
model.eval()
result = model(inputs, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_image_classification_fourier(
self, config, inputs, input_mask, sequence_labels, token_labels
):
model = PerceiverForImageClassificationFourier(config=config)
model.to(torch_device)
model.eval()
result = model(inputs, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_image_classification_conv(
self, config, inputs, input_mask, sequence_labels, token_labels
):
model = PerceiverForImageClassificationConvProcessing(config=config)
model.to(torch_device)
model.eval()
result = model(inputs, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, inputs, input_mask, sequence_labels, token_labels = config_and_inputs
inputs_dict = {"inputs": inputs, "attention_mask": input_mask}
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
config_and_inputs = self.prepare_config_and_inputs(model_class)
config, inputs, input_mask, sequence_labels, token_labels = config_and_inputs
inputs_dict = {"inputs": inputs, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| PerceiverModelTester |
python | google__pytype | pytype/rewrite/flow/frame_base_test.py | {
"start": 551,
"end": 722
} | class ____(opcodes.Opcode):
_FLAGS = opcodes.NO_NEXT
def __init__(self, index):
super().__init__(index=index, line=0)
# pylint: enable=invalid-name
| FAKE_OP_NO_NEXT |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 5677,
"end": 155319
} | class ____(
collections_abc.Iterable,
tracking_base.Trackable,
composite_tensor.CompositeTensor,
data_types.DatasetV2,
metaclass=abc.ABCMeta):
"""Represents a potentially large set of elements.
The `tf.data.Dataset` API supports writing descriptive and efficient input
pipelines. `Dataset` usage follows a common pattern:
1. Create a source dataset from your input data.
2. Apply dataset transformations to preprocess the data.
3. Iterate over the dataset and process the elements.
Iteration happens in a streaming fashion, so the full dataset does not need to
fit into memory.
Source Datasets:
The simplest way to create a dataset is to create it from a python `list`:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
To process lines from files, use `tf.data.TextLineDataset`:
>>> dataset = tf.data.TextLineDataset(["file1.txt", "file2.txt"])
To process records written in the `TFRecord` format, use `TFRecordDataset`:
>>> dataset = tf.data.TFRecordDataset(["file1.tfrecords", "file2.tfrecords"])
To create a dataset of all files matching a pattern, use
`tf.data.Dataset.list_files`:
```python
dataset = tf.data.Dataset.list_files("/path/*.txt")
```
See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`
for more ways to create datasets.
Transformations:
Once you have a dataset, you can apply transformations to prepare the data for
your model:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.map(lambda x: x*2)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[2, 4, 6]
Common Terms:
**Element**: A single output from calling `next()` on a dataset iterator.
Elements may be nested structures containing multiple components. For
example, the element `(1, (3, "apple"))` has one tuple nested in another
tuple. The components are `1`, `3`, and `"apple"`.
**Component**: The leaf in the nested structure of an element.
Supported types:
Elements can be nested structures of tuples, named tuples, and dictionaries.
Note that Python lists are *not* treated as nested structures of components.
Instead, lists are converted to tensors and treated as components. For
example, the element `(1, [1, 2, 3])` has only two components; the tensor `1`
and the tensor `[1, 2, 3]`. Element components can be of any type
representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`,
`tf.sparse.SparseTensor`, `tf.RaggedTensor`, and `tf.TensorArray`.
```python
a = 1 # Integer element
b = 2.0 # Float element
c = (1, 2) # Tuple element with 2 components
d = {"a": (2, 2), "b": 3} # Dict element with 3 components
Point = collections.namedtuple("Point", ["x", "y"])
e = Point(1, 2) # Named tuple
f = tf.data.Dataset.range(10) # Dataset element
```
For more information,
read [this guide](https://www.tensorflow.org/guide/data).
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
self._graph_attr = ops.get_default_graph()
# Initialize the options for this dataset and its inputs.
self._options_attr = options_lib.Options()
for input_dataset in self._inputs():
input_options = None
if isinstance(input_dataset, data_types.DatasetV1):
# If the V1 dataset does not have the `_dataset` attribute, we assume it
# is a dataset source and hence does not have options. Otherwise, we
# grab the options of `_dataset` object
if hasattr(input_dataset, "_dataset"):
if not isinstance(input_dataset._dataset, data_types.DatasetV2):
raise TypeError(
f"Each input of dataset {type(self)} should be a subclass of "
f"`tf.data.Dataset` but encountered "
f"{type(input_dataset._dataset)}.")
input_options = input_dataset._dataset._options_attr
elif isinstance(input_dataset, data_types.DatasetV2):
input_options = input_dataset._options_attr
else:
raise TypeError(
f"Each input of dataset {type(self)} should be a subclass of "
f"`tf.data.Dataset` but encountered {type(input_dataset)}.")
if input_options is not None:
self._options_attr = self._options_attr.merge(input_options)
self._options_attr._set_mutable(False) # pylint: disable=protected-access
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The `_variant_tensor` property cannot be modified.")
@deprecation.deprecated_args(None, "Use external_state_policy instead",
"allow_stateful")
def _as_serialized_graph(
self,
allow_stateful=None,
strip_device_assignment=None,
external_state_policy=options_lib.ExternalStatePolicy.WARN):
"""Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
strip_device_assignment: If true, non-local (i.e. job and task) device
assignment is stripped from ops in the serialized graph.
external_state_policy: The ExternalStatePolicy enum that determines how we
handle input pipelines that depend on external state. By default, its
set to WARN.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
if external_state_policy:
policy = external_state_policy.value
return gen_dataset_ops.dataset_to_graph_v2(
self._variant_tensor,
external_state_policy=policy,
strip_device_assignment=strip_device_assignment)
if strip_device_assignment:
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor,
allow_stateful=allow_stateful,
strip_device_assignment=strip_device_assignment)
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor, allow_stateful=allow_stateful)
def _maybe_track_assets(self, graph_def):
"""Finds and tracks nodes in `graph_def` that refer to asset files.
Args:
graph_def: Serialized graph representation of this dataset.
Returns:
A dictionary mapping the node name of an asset constant to a tracked
`asset.Asset` object.
"""
asset_tracker = {}
for node in graph_def.node:
if node.name.startswith("FileIdentity"):
asset_tracker[node.input[0]] = None
if not asset_tracker:
return {}
for node in graph_def.node:
if node.name in asset_tracker:
tensor_proto = node.attr["value"].tensor
with context.eager_mode(), ops.device("CPU"):
node_value = gen_parsing_ops.parse_tensor(
tensor_proto.SerializeToString(), dtypes.string).numpy()
asset_tracker[node.name] = ([
self._track_trackable(asset.Asset(n),
name=node.name + "_" + str(i), overwrite=True)
for i, n in enumerate(node_value)
])
return asset_tracker
def _trackable_children(self,
save_type=tracking_base.SaveType.CHECKPOINT,
**kwargs):
if save_type != tracking_base.SaveType.SAVEDMODEL:
return {}
# _trace_variant_creation only works when executing eagerly, so we don't
# want to run it in the object initialization.
@def_function.function(input_signature=[], autograph=False)
def _creator():
resource = self._trace_variant_creation()() # pylint: disable=protected-access
return resource
_creator.get_concrete_function() # Trigger asset tracking
children = super(DatasetV2, self)._trackable_children(save_type, **kwargs)
children["_variant_tracker"] = _VariantTracker(self._variant_tensor,
_creator)
return children
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Constructing a tf.function that reproduces a given dataset is only "
"supported for datasets created eagerly. Please file a feature "
"request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
# pylint: disable=protected-access
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph(external_state_policy=options_lib
.ExternalStatePolicy.FAIL).numpy())
output_node_names = []
for node in graph_def.node:
if node.op == "_Retval":
output_node_names = node.input
if len(output_node_names) != 1:
raise AssertionError(
f"Dataset graph is expected to only have one return value but found "
f"{len(output_node_names)} return values: {output_node_names}.")
output_node_name = output_node_names[0]
file_path_nodes = {}
# When building a tf.function, track files as `saved_model.Asset`s.
if ops.get_default_graph().building_function:
asset_tracker = self._maybe_track_assets(graph_def)
for key in asset_tracker:
assets_list = [
array_ops.expand_dims(asset.asset_path, axis=0)
for asset in asset_tracker[key]
]
file_path_nodes[key] = array_ops.concat(assets_list, axis=0)
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def,
inputs=[],
outputs=output_node_name + ":0",
captures=file_path_nodes)
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError(f"{type(self)}._inputs()")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The `_graph` property cannot be modified.")
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self) -> list[StructuredFunctionWrapper]:
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def _options(self):
"""Returns the options tensor for this dataset."""
# pylint: disable=protected-access
return gen_dataset_ops.get_options(self._variant_tensor)
@classmethod
def _options_tensor_to_options(cls, serialized_options):
"""Converts options tensor to tf.data.Options object."""
options = options_lib.Options()
if tensor_util.constant_value(serialized_options) is not None:
pb = dataset_options_pb2.Options.FromString(tensor_util.constant_value(
serialized_options))
options._from_proto(pb) # pylint: disable=protected-access
return options
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
if context.executing_eagerly():
options = self._options_tensor_to_options(self._options())
options._set_mutable(False) # pylint: disable=protected-access
return options
warnings.warn("To make it possible to preserve tf.data options across "
"serialization boundaries, their implementation has moved to "
"be part of the TensorFlow graph. As a consequence, the "
"options value is in general no longer known at graph "
"construction time. Invoking this method in graph mode "
"retains the legacy behavior of the original implementation, "
"but note that the returned value might not reflect the "
"actual value of the options.")
return self._options_attr
def _apply_debug_options(self):
if debug_mode.DEBUG_MODE:
# Disable autotuning and static optimizations that could introduce
# parallelism or asynchrony.
options = options_lib.Options()
options.autotune.enabled = False
options.experimental_optimization.filter_parallelization = False
options.experimental_optimization.map_and_batch_fusion = False
options.experimental_optimization.map_parallelization = False
dataset = _OptionsDataset(self, options)
else:
dataset = self
return dataset
def __iter__(self) -> iterator_ops.OwnedIterator:
"""Creates an iterator for elements of this dataset.
The returned iterator implements the Python Iterator protocol.
Returns:
An `tf.data.Iterator` for the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if context.executing_eagerly() or ops.inside_function():
with ops.colocate_with(self._variant_tensor):
return iterator_ops.OwnedIterator(self)
else:
raise RuntimeError("`tf.data.Dataset` only supports Python-style "
"iteration in eager mode or within tf.function.")
def __bool__(self):
return True # Required as __len__ is defined
__nonzero__ = __bool__ # Python 2 backward compatibility
def __len__(self):
"""Returns the length of the dataset if it is known and finite.
This method requires that you are running in eager mode, and that the
length of the dataset is known and non-infinite. When the length may be
unknown or infinite, or if you are running in graph mode, use
`tf.data.Dataset.cardinality` instead.
Returns:
An integer representing the length of the dataset.
Raises:
RuntimeError: If the dataset length is unknown or infinite, or if eager
execution is not enabled.
"""
if not context.executing_eagerly():
raise TypeError("`tf.data.Dataset` only supports `len` in eager mode. "
"Use `tf.data.Dataset.cardinality()` instead.")
length = self.cardinality()
if length.numpy() == INFINITE:
raise TypeError("The dataset is infinite.")
if length.numpy() == UNKNOWN:
raise TypeError("The dataset length is unknown.")
return length
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset.element_spec
TensorSpec(shape=(), dtype=tf.int32, name=None)
For more information,
read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).
Returns:
A (nested) structure of `tf.TypeSpec` objects matching the structure of an
element of this dataset and specifying the type of individual components.
"""
raise NotImplementedError(f"{type(self)}.element_spec()")
def __repr__(self):
type_ = type(self._dataset if isinstance(self, DatasetV1Adapter) else self)
return f"<{type_.__name__} element_spec={self.element_spec}>"
def __debug_string__(self):
"""Returns a string showing the type of the dataset and its inputs.
This string is intended only for debugging purposes, and may change without
warning.
"""
lines = []
to_process = [(self, 0)] # Stack of (dataset, depth) pairs.
while to_process:
dataset, depth = to_process.pop()
lines.append("-"*2*depth + repr(dataset))
to_process.extend([(ds, depth+1) for ds in dataset._inputs()]) # pylint: disable=protected-access
return "\n".join(lines)
def as_numpy_iterator(self):
"""Returns an iterator which converts all elements of the dataset to numpy.
Use `as_numpy_iterator` to inspect the content of your dataset. To see
element shapes and types, print dataset elements directly instead of using
`as_numpy_iterator`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
This method requires that you are running in eager mode and the dataset's
element_spec contains only `TensorSpec` components.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset.as_numpy_iterator():
... print(element)
1
2
3
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2, 3]
`as_numpy_iterator()` will preserve the nested structure of dataset
elements.
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),
... 'b': [5, 6]})
>>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},
... {'a': (2, 4), 'b': 6}]
True
Returns:
An iterable over the elements of the dataset, with their tensors converted
to numpy arrays.
Raises:
TypeError: if an element contains a non-`Tensor` value.
RuntimeError: if eager execution is not enabled.
"""
if not context.executing_eagerly():
raise RuntimeError("`tf.data.Dataset.as_numpy_iterator()` is only "
"supported in eager mode.")
for component_spec in nest.flatten(self.element_spec):
if not isinstance(
component_spec,
(tensor_spec.TensorSpec, ragged_tensor.RaggedTensorSpec,
sparse_tensor_lib.SparseTensorSpec, none_tensor.NoneTensorSpec)):
raise TypeError(
f"`tf.data.Dataset.as_numpy_iterator()` is not supported for "
f"datasets that produce values of type {component_spec.value_type}")
return NumpyIterator(self)
@property
def _flat_shapes(self):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
"""Returns a list `tf.DType`s for the element tensor representation.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
"""Helper for setting `output_shapes` and `output_types` attrs of an op.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass `**self._flat_structure`
to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _metadata(self):
"""Helper for generating dataset metadata."""
metadata = dataset_metadata_pb2.Metadata()
if self._name:
metadata.name = _validate_and_encode(self._name)
return metadata
@property
def _common_args(self):
"""Helper for generating arguments that are common across most dataset ops.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element, as well as a
`metadata` argument for additional metadata such as user-defined dataset
name. This helper function generates common attributes as a keyword argument
dictionary, allowing `Dataset._variant_tensor` implementations to pass
`**self._common_args` to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"metadata": self._metadata.SerializeToString(),
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors, name=None) -> "DatasetV2":
"""Creates a `Dataset` with a single element, comprising the given tensors.
`from_tensors` produces a dataset containing only a single element. To slice
the input tensor into multiple elements, use `from_tensor_slices` instead.
>>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])
>>> list(dataset.as_numpy_iterator())
[array([1, 2, 3], dtype=int32)]
>>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))
>>> list(dataset.as_numpy_iterator())
[(array([1, 2, 3], dtype=int32), b'A')]
>>> # You can use `from_tensors` to produce a dataset which repeats
>>> # the same example many times.
>>> example = tf.constant([1,2,3])
>>> dataset = tf.data.Dataset.from_tensors(example).repeat(2)
>>> list(dataset.as_numpy_iterator())
[array([1, 2, 3], dtype=int32), array([1, 2, 3], dtype=int32)]
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset "element". Supported values are documented
[here](https://www.tensorflow.org/guide/data#dataset_structure).
name: (Optional.) A name for the tf.data operation.
Returns:
Dataset: A `Dataset`.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# from_tensors_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import from_tensors_op
return from_tensors_op._from_tensors(tensors, name)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def from_tensor_slices(tensors, name=None) -> "DatasetV2":
"""Creates a `Dataset` whose elements are slices of the given tensors.
The given tensors are sliced along their first dimension. This operation
preserves the structure of the input tensors, removing the first dimension
of each tensor and using it as the dataset dimension. All input tensors
must have the same size in their first dimensions.
>>> # Slicing a 1D tensor produces scalar tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2, 3]
>>> # Slicing a 2D tensor produces 1D tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])
>>> list(dataset.as_numpy_iterator())
[array([1, 2], dtype=int32), array([3, 4], dtype=int32)]
>>> # Slicing a tuple of 1D tensors produces tuple elements containing
>>> # scalar tensors.
>>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
>>> [(n0.item(), n1.item(), n2.item()) for n0, n1, n2 in
... dataset.as_numpy_iterator()]
[(1, 3, 5), (2, 4, 6)]
>>> # Dictionary structure is also preserved.
>>> dataset = tf.data.Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4]})
>>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},
... {'a': 2, 'b': 4}]
True
>>> # Two tensors can be combined into one Dataset object.
>>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor
>>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor
>>> dataset = Dataset.from_tensor_slices((features, labels))
>>> # Both the features and the labels tensors can be converted
>>> # to a Dataset object separately and combined after.
>>> features_dataset = Dataset.from_tensor_slices(features)
>>> labels_dataset = Dataset.from_tensor_slices(labels)
>>> dataset = Dataset.zip((features_dataset, labels_dataset))
>>> # A batched feature and label set can be converted to a Dataset
>>> # in similar fashion.
>>> batched_features = tf.constant([[[1, 3], [2, 3]],
... [[2, 1], [1, 2]],
... [[3, 3], [3, 2]]], shape=(3, 2, 2))
>>> batched_labels = tf.constant([['A', 'A'],
... ['B', 'B'],
... ['A', 'B']], shape=(3, 2, 1))
>>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))
>>> for element in dataset.as_numpy_iterator():
... print(element)
(array([[1, 3],
[2, 3]], dtype=int32), array([[b'A'],
[b'A']], dtype=object))
(array([[2, 1],
[1, 2]], dtype=int32), array([[b'B'],
[b'B']], dtype=object))
(array([[3, 3],
[3, 2]], dtype=int32), array([[b'A'],
[b'B']], dtype=object))
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element, whose components have the same first
dimension. Supported values are documented
[here](https://www.tensorflow.org/guide/data#dataset_structure).
name: (Optional.) A name for the tf.data operation.
Returns:
Dataset: A `Dataset`.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# from_tensor_slices_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import from_tensor_slices_op
return from_tensor_slices_op._from_tensor_slices(tensors, name)
# pylint: enable=g-import-not-at-top,protected-access
class _GeneratorState:
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def _normalize_id(self, iterator_id):
# In debug mode, iterator ids may be eagerly-generated np.arrays instead
# of Tensors. We convert them to scalars to make them hashable.
if isinstance(iterator_id, np.ndarray):
return iterator_id.item()
return iterator_id
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
iterator_id = self._normalize_id(iterator_id)
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[self._normalize_id(iterator_id)]
@staticmethod
@deprecation.deprecated_args(None, "Use output_signature instead",
"output_types", "output_shapes")
def from_generator(
generator,
output_types=None,
output_shapes=None,
args=None,
output_signature=None,
name=None,
) -> "DatasetV2":
"""Creates a `Dataset` whose elements are generated by `generator`.
Note: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the dataset and iterator related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. In particular, using `from_generator` will
preclude the use of tf.data service for scaling out dataset processing.
The body of `generator` will not be serialized in a `GraphDef`, and you
should not use this method if you need to serialize your model and restore
it in a different environment.
The `generator` argument must be a callable object that returns
an object that supports the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with either the
given `output_signature` argument or with the given `output_types` and
(optionally) `output_shapes` arguments, whichever was specified.
The recommended way to call `from_generator` is to use the
`output_signature` argument. In this case the output will be assumed to
consist of objects with the classes, shapes and types defined by
`tf.TypeSpec` objects from `output_signature` argument:
>>> def gen():
... ragged_tensor = tf.ragged.constant([[1, 2], [3]])
... yield 42, ragged_tensor
>>>
>>> dataset = tf.data.Dataset.from_generator(
... gen,
... output_signature=(
... tf.TensorSpec(shape=(), dtype=tf.int32),
... tf.RaggedTensorSpec(shape=(2, None), dtype=tf.int32)))
>>>
>>> list(dataset.take(1))
[(<tf.Tensor: shape=(), dtype=int32, numpy=42>,
<tf.RaggedTensor [[1, 2], [3]]>)]
There is also a deprecated way to call `from_generator` by either with
`output_types` argument alone or together with `output_shapes` argument.
In this case the output of the function will be assumed to consist of
`tf.Tensor` objects with the types defined by `output_types` and with the
shapes which are either unknown or defined by `output_shapes`.
Note: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Note: While the `output_signature` parameter makes it possible to yield
`Dataset` elements, the scope of `Dataset.from_generator()` should be
limited to logic that cannot be expressed through tf.data operations. Using
tf.data operations within the generator function is an anti-pattern and may
result in incremental memory growth.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: (Optional.) A (nested) structure of `tf.DType` objects
corresponding to each component of an element yielded by `generator`.
output_shapes: (Optional.) A (nested) structure of `tf.TensorShape`
objects corresponding to each component of an element yielded by
`generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
output_signature: (Optional.) A (nested) structure of `tf.TypeSpec`
objects corresponding to each component of an element yielded by
`generator`.
name: (Optional.) A name for the tf.data operations used by
`from_generator`.
Returns:
Dataset: A `Dataset`.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# from_generator_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import from_generator_op
return from_generator_op._from_generator(generator, output_types,
output_shapes, args,
output_signature, name)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def range(*args, **kwargs) -> "DatasetV2":
"""Creates a `Dataset` of a step-separated range of values.
>>> ds = Dataset.range(5)
>>> [a.item() for a in ds.as_numpy_iterator()]
[0, 1, 2, 3, 4]
>>> ds = Dataset.range(2, 5)
>>> [a.item() for a in ds.as_numpy_iterator()]
[2, 3, 4]
>>> ds = Dataset.range(1, 5, 2)
>>> [a.item() for a in ds.as_numpy_iterator()]
[1, 3]
>>> ds = Dataset.range(1, 5, -2)
>>> [a.item() for a in ds.as_numpy_iterator()]
[]
>>> ds = Dataset.range(5, 1)
>>> [a.item() for a in ds.as_numpy_iterator()]
[]
>>> ds = Dataset.range(5, 1, -2)
>>> [a.item() for a in ds.as_numpy_iterator()]
[5, 3]
>>> ds = Dataset.range(2, 5, output_type=tf.int32)
>>> [a.item() for a in ds.as_numpy_iterator()]
[2, 3, 4]
>>> ds = Dataset.range(1, 5, 2, output_type=tf.float32)
>>> [a.item() for a in ds.as_numpy_iterator()]
[1.0, 3.0]
Args:
*args: follows the same semantics as python's range.
len(args) == 1 -> start = 0, stop = args[0], step = 1.
len(args) == 2 -> start = args[0], stop = args[1], step = 1.
len(args) == 3 -> start = args[0], stop = args[1], step = args[2].
**kwargs:
- output_type: Its expected dtype. (Optional, default: `tf.int64`).
- name: (Optional.) A name for the tf.data operation.
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> range_op ->
# -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import range_op
return range_op._range(*args, **kwargs)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def zip(*args, datasets=None, name=None) -> "DatasetV2":
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be a (nested) structure of `Dataset` objects. The supported
nesting mechanisms are documented
[here] (https://www.tensorflow.org/guide/data#dataset_structure).
>>> # The datasets or nested structure of datasets `*args` argument
>>> # determines the structure of elements in the resulting dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
>>> ds = tf.data.Dataset.zip(a, b)
>>> [(i.item(), j.item()) for i, j in ds.as_numpy_iterator()]
[(1, 4), (2, 5), (3, 6)]
>>> ds = tf.data.Dataset.zip(b, a)
>>> [(i.item(), j.item()) for i, j in ds.as_numpy_iterator()]
[(4, 1), (5, 2), (6, 3)]
>>>
>>> # The `datasets` argument may contain an arbitrary number of datasets.
>>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],
... # [9, 10],
... # [11, 12] ]
>>> ds = tf.data.Dataset.zip(a, b, c)
>>> for i, j, k in ds.as_numpy_iterator():
... print(i.item(), j.item(), k)
1 4 [7 8]
2 5 [ 9 10]
3 6 [11 12]
>>>
>>> # The number of elements in the resulting dataset is the same as
>>> # the size of the smallest dataset in `datasets`.
>>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]
>>> ds = tf.data.Dataset.zip(a, d)
>>> [(i.item(), j.item()) for i, j in ds.as_numpy_iterator()]
[(1, 13), (2, 14)]
Args:
*args: Datasets or nested structures of datasets to zip together. This
can't be set if `datasets` is set.
datasets: A (nested) structure of datasets. This can't be set if `*args`
is set. Note that this exists only for backwards compatibility and it is
preferred to use *args.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> zip_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import zip_op
if not args and datasets is None:
raise TypeError("Must pass at least one dataset to `zip`.")
if args and datasets is not None:
raise TypeError("Both `*args` and `datasets` cannot be set.")
if len(args) == 1:
datasets = args[0]
elif len(args) > 1:
datasets = args
return zip_op._zip(datasets, name)
# pylint: enable=g-import-not-at-top,protected-access
def concatenate(self, dataset, name=None) -> "DatasetV2":
"""Creates a `Dataset` by concatenating the given dataset with this dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
>>> ds = a.concatenate(b)
>>> [a.item() for a in ds.as_numpy_iterator()]
[1, 2, 3, 4, 5, 6, 7]
>>> # The input dataset and dataset to be concatenated should have
>>> # compatible element specs.
>>> c = tf.data.Dataset.zip((a, b))
>>> a.concatenate(c)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and (tf.int64, tf.int64)
>>> d = tf.data.Dataset.from_tensor_slices(["a", "b", "c"])
>>> a.concatenate(d)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and <dtype: 'string'>
Args:
dataset: `Dataset` to be concatenated.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# concatenate_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import concatenate_op
return concatenate_op._concatenate(self, dataset, name)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def counter(start=0, step=1, dtype=dtypes.int64, name=None) -> "DatasetV2":
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
Unlike `tf.data.Dataset.range`, which stops at some ending number,
`tf.data.Dataset.counter` produces elements indefinitely.
>>> dataset = tf.data.experimental.Counter().take(5)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2, 3, 4]
>>> dataset.element_spec
TensorSpec(shape=(), dtype=tf.int64, name=None)
>>> dataset = tf.data.experimental.Counter(dtype=tf.int32)
>>> dataset.element_spec
TensorSpec(shape=(), dtype=tf.int32, name=None)
>>> dataset = tf.data.experimental.Counter(start=2).take(5)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[2, 3, 4, 5, 6]
>>> dataset = tf.data.experimental.Counter(start=2, step=5).take(5)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[2, 7, 12, 17, 22]
>>> dataset = tf.data.experimental.Counter(start=10, step=-1).take(5)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[10, 9, 8, 7, 6]
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
name: (Optional.) A name for the tf.data operation.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> counter_op
# -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import counter_op
return counter_op._counter(start, step, dtype, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def fingerprint(self):
"""Computes the fingerprint of this `Dataset`.
If two datasets have the same fingerprint, it is guaranteed that they
would produce identical elements as long as the content of the upstream
input files does not change and they produce data deterministically.
However, two datasets producing identical values does not always mean they
would have the same fingerprint due to different graph constructs.
In other words, if two datasets have different fingerprints, they could
still produce identical values.
Returns:
A scalar `tf.Tensor` of type `tf.uint64`.
"""
return gen_dataset_ops.dataset_fingerprint(self._variant_tensor)
def rebatch(self, batch_size, drop_remainder=False, name=None) -> "DatasetV2":
"""Creates a `Dataset` that rebatches the elements from this dataset.
`rebatch(N)` is functionally equivalent to `unbatch().batch(N)`, but is
more efficient, performing one copy instead of two.
>>> ds = tf.data.Dataset.range(6)
>>> ds = ds.batch(2)
>>> ds = ds.rebatch(3)
>>> list(ds.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5])]
>>> ds = tf.data.Dataset.range(7)
>>> ds = ds.batch(4)
>>> ds = ds.rebatch(3)
>>> list(ds.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5]), array([6])]
>>> ds = tf.data.Dataset.range(7)
>>> ds = ds.batch(2)
>>> ds = ds.rebatch(3, drop_remainder=True)
>>> list(ds.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5])]
If the `batch_size` argument is a list, `rebatch` cycles through the list
to determine the size of each batch.
>>> ds = tf.data.Dataset.range(8)
>>> ds = ds.batch(4)
>>> ds = ds.rebatch([2, 1, 1])
>>> list(ds.as_numpy_iterator())
[array([0, 1]), array([2]), array([3]), array([4, 5]), array([6]),
array([7])]
Args:
batch_size: A `tf.int64` scalar or vector, representing the size of
batches to produce. If this argument is a vector, these values are
cycled through in round robin fashion.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size[cycle_index]` elements; the default behavior is not to drop
the smaller batch.
name: (Optional.) A name for the tf.data operation.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> rebatch_op ->
# rebatch_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import rebatch_op
return rebatch_op._rebatch(self, batch_size, drop_remainder, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def prefetch(self, buffer_size, name=None) -> "DatasetV2":
"""Creates a `Dataset` that prefetches elements from this dataset.
Most dataset input pipelines should end with a call to `prefetch`. This
allows later elements to be prepared while the current element is being
processed. This often improves latency and throughput, at the cost of
using additional memory to store prefetched elements.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.prefetch(2)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2]
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching. If the value
`tf.data.AUTOTUNE` is used, then the buffer size is dynamically tuned.
name: Optional. A name for the tf.data transformation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
return prefetch_op._prefetch( # pylint: disable=protected-access
self, buffer_size, name=name)
@staticmethod
def list_files(
file_pattern, shuffle=None, seed=None, name=None
) -> "DatasetV2":
"""A dataset of all files matching one or more glob patterns.
The `file_pattern` argument should be a small number of glob patterns.
If your filenames have already been globbed, use
`Dataset.from_tensor_slices(filenames)` instead, as re-globbing every
filename with `list_files` may result in poor performance with remote
storage systems.
Note: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
name: Optional. A name for the tf.data operations used by `list_files`.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_assert.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
# TODO(b/240947712): Remove lazy import after this method is factored out.
# Loaded lazily due to a circular dependency (dataset_ops ->
# from_tensor_slices_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import from_tensor_slices_op
dataset = from_tensor_slices_op._TensorSliceDataset(
matching_files, is_files=True, name=name)
# pylint: enable=g-import-not-at-top,protected-access
if issubclass(Dataset, DatasetV1):
dataset = DatasetV1Adapter(dataset)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed, name=name)
return dataset
def repeat(self, count=None, name=None) -> "DatasetV2":
"""Repeats this dataset so each original value is seen `count` times.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.repeat(3)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2, 3, 1, 2, 3, 1, 2, 3]
Note: If the input dataset depends on global state (e.g. a random number
generator) or its output is non-deterministic (e.g. because of upstream
`shuffle`), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> repeat_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access,redefined-outer-name
from tensorflow.python.data.ops import repeat_op
return repeat_op._repeat(self, count, name)
# pylint: enable=g-import-not-at-top,protected-access,redefined-outer-name
def enumerate(self, start=0, name=None) -> "DatasetV2":
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.enumerate(start=5)
>>> for pos, element in dataset.as_numpy_iterator():
... print(tuple((pos.item(), element.item())))
(5, 1)
(6, 2)
(7, 3)
>>> # The (nested) structure of the input dataset determines the
>>> # structure of elements in the resulting dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])
>>> dataset = dataset.enumerate()
>>> for pos, element in dataset.as_numpy_iterator():
... print(tuple((pos.item(), element)))
(0, array([7, 8], dtype=int32))
(1, array([ 9, 10], dtype=int32))
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
name: Optional. A name for the tf.data operations used by `enumerate`.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
range_dataset = Dataset.range(start, max_value, name=name)
# Replicate the range component so that each split is enumerated
# independently. This avoids the need for prohibitively expensive
# cross-split coordination.
range_dataset = apply_rewrite(range_dataset, "replicate_on_split")
return Dataset.zip((range_dataset, self), name=name)
def shuffle(
self, buffer_size, seed=None, reshuffle_each_iteration=True, name=None
) -> "DatasetV2":
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
`reshuffle_each_iteration` controls whether the shuffle order should be
different for each epoch. However you should avoid using
`shuffle(reshuffle_each_iteration=True)`, then `take` and `skip` to split
a dataset into training and test sets, which would lead to data leakage (as
the entire dataset would be re-shuffled then re-split after each epoch).
Please use the `tf.keras.utils.split_dataset` method instead. In TF 1.X,
the idiomatic way to create epochs was through the `repeat` transformation:
```python
dataset = tf.data.Dataset.range(3)
dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
dataset = dataset.repeat(2)
# [1, 0, 2, 1, 2, 0]
dataset = tf.data.Dataset.range(3)
dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
dataset = dataset.repeat(2)
# [1, 0, 2, 1, 0, 2]
```
In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it
possible to also create epochs through Python iteration:
```python
dataset = tf.data.Dataset.range(3)
dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
list(dataset.as_numpy_iterator())
# [1, 0, 2]
list(dataset.as_numpy_iterator())
# [1, 2, 0]
```
```python
dataset = tf.data.Dataset.range(3)
dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
list(dataset.as_numpy_iterator())
# [1, 0, 2]
list(dataset.as_numpy_iterator())
# [1, 0, 2]
```
#### Fully shuffling all the data
To shuffle an entire dataset, set `buffer_size=dataset.cardinality()`. This
is equivalent to setting the `buffer_size` equal to the number of elements
in the dataset, resulting in uniform shuffle.
Note: `shuffle(dataset.cardinality())` loads the full dataset into memory so
that it can be shuffled. This will cause a memory overflow (OOM) error if
the dataset is too large, so full-shuffle should only be used for datasets
that are known to fit in the memory, such as datasets of filenames or other
small datasets.
```python
dataset = tf.data.Dataset.range(20)
dataset = dataset.shuffle(dataset.cardinality())
# [18, 4, 9, 2, 17, 8, 5, 10, 0, 6, 16, 3, 19, 7, 14, 11, 15, 13, 12, 1]
```
Args:
buffer_size: An int or `tf.int64` scalar `tf.Tensor`, representing the
number of elements from this dataset from which the new dataset will
sample. To uniformly shuffle the entire dataset, use
`buffer_size=dataset.cardinality()`.
seed: (Optional.) An int or `tf.int64` scalar `tf.Tensor`, representing
the random seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
return shuffle_op._shuffle( # pylint: disable=protected-access
self, buffer_size, seed, reshuffle_each_iteration, name=name)
def cache(self, filename="", name=None) -> "DatasetV2":
"""Caches the elements in this dataset.
The first time the dataset is iterated over, its elements will be cached
either in the specified file or in memory. Subsequent iterations will
use the cached data.
Note: To guarantee that the cache gets finalized, the input dataset must be
iterated through in its entirety, until it raises StopIteration. Otherwise,
subsequent iterations may not use cached data.
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.map(lambda x: x**2)
>>> dataset = dataset.cache()
>>> # The first time reading through the data will generate the data using
>>> # `range` and `map`.
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 4, 9, 16]
>>> # Subsequent iterations read from the cache.
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 4, 9, 16]
When caching to a file, the cached data will persist across runs. Even the
first iteration through the data will read from the cache file. Changing
the input pipeline before the call to `.cache()` will have no effect until
the cache file is removed or the filename is changed.
```python
dataset = tf.data.Dataset.range(5)
dataset = dataset.cache("/path/to/file")
list(dataset.as_numpy_iterator())
# [0, 1, 2, 3, 4]
dataset = tf.data.Dataset.range(10)
dataset = dataset.cache("/path/to/file") # Same file!
list(dataset.as_numpy_iterator())
# [0, 1, 2, 3, 4]
```
Note: `cache` will produce exactly the same elements during each iteration
through the dataset. If you wish to randomize the iteration order, make sure
to call `shuffle` *after* calling `cache`.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> cache_op ->
# -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import cache_op
return cache_op._cache(self, filename, name)
# pylint: enable=g-import-not-at-top,protected-access
def take(self, count, name=None) -> "DatasetV2":
"""Creates a `Dataset` with at most `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.take(3)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# take_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import take_op
return take_op._take(self, count, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def skip(self, count, name=None) -> "DatasetV2":
"""Creates a `Dataset` that skips `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.skip(7)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[7, 8, 9]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# skip_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import skip_op
return skip_op._skip(self, count, name)
# pylint: enable=g-import-not-at-top,protected-access
def shard(self, num_shards, index, name=None) -> "DatasetV2":
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
`shard` is deterministic. The Dataset produced by `A.shard(n, i)` will
contain all elements of A whose index mod n = i.
>>> A = tf.data.Dataset.range(10)
>>> B = A.shard(num_shards=3, index=0)
>>> [a.item() for a in B.as_numpy_iterator()]
[0, 3, 6, 9]
>>> C = A.shard(num_shards=3, index=1)
>>> [a.item() for a in C.as_numpy_iterator()]
[1, 4, 7]
>>> D = A.shard(num_shards=3, index=2)
>>> [a.item() for a in D.as_numpy_iterator()]
[2, 5, 8]
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can shard elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern, shuffle=False)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import shard_op
return shard_op._shard(self, num_shards, index, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def save(self,
path,
compression=None,
shard_func=None,
checkpoint_args=None):
"""Saves the content of the given dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> dataset.save(path)
>>> new_dataset = tf.data.Dataset.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
The saved dataset is saved in multiple file "shards". By default, the
dataset output is divided to shards in a round-robin fashion but custom
sharding can be specified via the `shard_func` function. For example, you
can save the dataset to using a single shard as follows:
```python
dataset = make_dataset()
def custom_shard_func(element):
return np.int64(0)
dataset.save(
path="/path/to/data", ..., shard_func=custom_shard_func)
```
To enable checkpointing, pass in `checkpoint_args` to the `save` method
as follows:
```python
dataset = tf.data.Dataset.range(100)
save_dir = "..."
checkpoint_prefix = "..."
step_counter = tf.Variable(0, trainable=False)
checkpoint_args = {
"checkpoint_interval": 50,
"step_counter": step_counter,
"directory": checkpoint_prefix,
"max_to_keep": 20,
}
dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args)
```
NOTE: The directory layout and file format used for saving the dataset is
considered an implementation detail and may change. For this reason,
datasets saved through `tf.data.Dataset.save` should only be consumed
through `tf.data.Dataset.load`, which is guaranteed to be
backwards compatible.
Args:
path: Required. A directory to use for saving the dataset.
compression: Optional. The algorithm to use to compress data when writing
it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
shard_func: Optional. A function to control the mapping of dataset
elements to file shards. The function is expected to map elements of
the input dataset to int64 shard IDs. If present, the function will be
traced and executed as graph computation.
checkpoint_args: Optional args for checkpointing which will be passed into
the `tf.train.CheckpointManager`. If `checkpoint_args` are not
specified, then checkpointing will not be performed. The `save()`
implementation creates a `tf.train.Checkpoint` object internally, so
users should not set the `checkpoint` argument in `checkpoint_args`.
Returns:
An operation which when executed performs the save. When writing
checkpoints, returns None. The return value is useful in unit tests.
Raises:
ValueError if `checkpoint` is passed into `checkpoint_args`.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> save_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import save_op
return save_op._save(self, path, compression, shard_func, checkpoint_args)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def load(
path, element_spec=None, compression=None, reader_func=None, wait=False,
) -> "DatasetV2":
"""Loads a previously saved dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> tf.data.Dataset.save(dataset, path)
>>> new_dataset = tf.data.Dataset.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
If the default option of sharding the saved dataset was used, the element
order of the saved dataset will be preserved when loading it.
The `reader_func` argument can be used to specify a custom order in which
elements should be loaded from the individual shards. The `reader_func` is
expected to take a single argument -- a dataset of datasets, each containing
elements of one of the shards -- and return a dataset of elements. For
example, the order of shards can be shuffled when loading them as follows:
```python
def custom_reader_func(datasets):
datasets = datasets.shuffle(NUM_SHARDS)
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = tf.data.Dataset.load(
path="/path/to/data", ..., reader_func=custom_reader_func)
```
Args:
path: Required. A path pointing to a previously saved dataset.
element_spec: Optional. A nested structure of `tf.TypeSpec` objects
matching the structure of an element of the saved dataset and specifying
the type of individual element components. If not provided, the nested
structure of `tf.TypeSpec` saved with the saved dataset is used. Note
that this argument is required in graph mode.
compression: Optional. The algorithm to use to decompress the data when
reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
reader_func: Optional. A function to control how to read data from shards.
If present, the function will be traced and executed as graph
computation.
wait: If `True`, for snapshots written with `distributed_save`, it reads
the snapshot while it is being written. For snapshots written with
regular `save`, it waits for the snapshot until it's finished. The
default is `False` for backward compatibility. Users of
`distributed_save` are recommended to set it to `True`.
Returns:
A `tf.data.Dataset` instance.
Raises:
FileNotFoundError: If `element_spec` is not specified and the saved nested
structure of `tf.TypeSpec` can not be located with the saved dataset.
ValueError: If `element_spec` is not specified and the method is executed
in graph mode.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> load_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import load_op
return load_op._load(
path=path,
element_spec=element_spec,
compression=compression,
reader_func=reader_func,
wait=wait)
# pylint: enable=g-import-not-at-top,protected-access
def batch(
self,
batch_size,
drop_remainder=False,
num_parallel_calls=None,
deterministic=None,
name=None,
) -> "DatasetV2":
"""Combines consecutive elements of this dataset into batches.
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3, drop_remainder=True)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5])]
The components of the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Note: If your program requires data to have a statically known shape (e.g.,
when using XLA), you should use `drop_remainder=True`. Without
`drop_remainder=True` the shape of the output dataset will have an unknown
leading dimension due to the possibility of a smaller final batch.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to compute asynchronously in
parallel.
If not specified, batches will be computed sequentially. If the value
`tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available resources.
deterministic: (Optional.) When `num_parallel_calls` is specified, if this
boolean is specified (`True` or `False`), it controls the order in which
the transformation produces elements. If set to `False`, the
transformation is allowed to yield elements out of order to trade
determinism for performance. If not specified, the
`tf.data.Options.deterministic` option (`True` by default) controls the
behavior.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> batch_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access,redefined-outer-name
from tensorflow.python.data.ops import batch_op
return batch_op._batch(self, batch_size, drop_remainder, num_parallel_calls,
deterministic, name)
# pylint: enable=g-import-not-at-top,protected-access,redefined-outer-name
def padded_batch(
self,
batch_size,
padded_shapes=None,
padding_values=None,
drop_remainder=False,
name=None,
) -> "DatasetV2":
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padded_shapes`. The `padded_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant, the component will be padded out to that
length in that dimension.
* If the dimension is unknown, the component will be padded out to the
maximum length of all elements in that dimension.
>>> A = (tf.data.Dataset
... .range(1, 5, output_type=tf.int32)
... .map(lambda x: tf.fill([x], x)))
>>> # Pad to the smallest per-batch size that fits all elements.
>>> B = A.padded_batch(2)
>>> for element in B.as_numpy_iterator():
... print(element)
[[1 0]
[2 2]]
[[3 3 3 0]
[4 4 4 4]]
>>> # Pad to a fixed size.
>>> C = A.padded_batch(2, padded_shapes=5)
>>> for element in C.as_numpy_iterator():
... print(element)
[[1 0 0 0 0]
[2 2 0 0 0]]
[[3 3 3 0 0]
[4 4 4 4 0]]
>>> # Pad with a custom value.
>>> D = A.padded_batch(2, padded_shapes=5, padding_values=-1)
>>> for element in D.as_numpy_iterator():
... print(element)
[[ 1 -1 -1 -1 -1]
[ 2 2 -1 -1 -1]]
[[ 3 3 3 -1 -1]
[ 4 4 4 4 -1]]
>>> # Components of nested elements can be padded independently.
>>> elements = [([1, 2, 3], [10]),
... ([4, 5], [11, 12])]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: iter(elements), (tf.int32, tf.int32))
>>> # Pad the first component of the tuple to length 4, and the second
>>> # component to the smallest size that fits.
>>> dataset = dataset.padded_batch(2,
... padded_shapes=([4], [None]),
... padding_values=(-1, 100))
>>> list(dataset.as_numpy_iterator())
[(array([[ 1, 2, 3, -1], [ 4, 5, -1, -1]], dtype=int32),
array([[ 10, 100], [ 11, 12]], dtype=int32))]
>>> # Pad with a single value and multiple components.
>>> E = tf.data.Dataset.zip((A, A)).padded_batch(2, padding_values=-1)
>>> for element in E.as_numpy_iterator():
... print(element)
(array([[ 1, -1],
[ 2, 2]], dtype=int32), array([[ 1, -1],
[ 2, 2]], dtype=int32))
(array([[ 3, 3, 3, -1],
[ 4, 4, 4, 4]], dtype=int32), array([[ 3, 3, 3, -1],
[ 4, 4, 4, 4]], dtype=int32))
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.sparse.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: (Optional.) A (nested) structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects representing the shape to which
the respective component of each input element should be padded prior
to batching. Any unknown dimensions will be padded to the maximum size
of that dimension in each batch. If unset, all dimensions of all
components are padded to the maximum size in the batch. `padded_shapes`
must be set if any component has an unknown rank.
padding_values: (Optional.) A (nested) structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. None represents that the (nested) structure should be padded
with default values. Defaults are `0` for numeric types and the empty
string for string types. The `padding_values` should have the same
(nested) structure as the input dataset. If `padding_values` is a single
element and the input dataset has multiple components, then the same
`padding_values` will be used to pad every component of the dataset.
If `padding_values` is a scalar, then its value will be broadcasted
to match the shape of each component.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
ValueError: If a component has an unknown rank, and the `padded_shapes`
argument is not set.
TypeError: If a component is of an unsupported type. The list of supported
types is documented in
https://www.tensorflow.org/guide/data#dataset_structure.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# padded_batch_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import padded_batch_op
return padded_batch_op._padded_batch(self, batch_size, padded_shapes,
padding_values, drop_remainder, name)
# pylint: enable=g-import-not-at-top,protected-access
def ragged_batch(
self,
batch_size,
drop_remainder=False,
row_splits_dtype=dtypes.int64,
name=None,
) -> "DatasetV2":
"""Combines consecutive elements of this dataset into `tf.RaggedTensor`s.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes:
* If an input element is a `tf.Tensor` whose static `tf.TensorShape` is
fully defined, then it is batched as normal.
* If an input element is a `tf.Tensor` whose static `tf.TensorShape`
contains one or more axes with unknown size (i.e., `shape[i]=None`), then
the output will contain a `tf.RaggedTensor` that is ragged up to any of such
dimensions.
* If an input element is a `tf.RaggedTensor` or any other type, then it is
batched as normal.
Example:
>>> dataset = tf.data.Dataset.range(6)
>>> dataset = dataset.map(lambda x: tf.range(x))
>>> dataset.element_spec.shape
TensorShape([None])
>>> dataset = dataset.ragged_batch(2)
>>> for batch in dataset:
... print(batch)
<tf.RaggedTensor [[], [0]]>
<tf.RaggedTensor [[0, 1], [0, 1, 2]]>
<tf.RaggedTensor [[0, 1, 2, 3], [0, 1, 2, 3, 4]]>
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
row_splits_dtype: The dtype that should be used for the `row_splits` of
any new ragged tensors. Existing `tf.RaggedTensor` elements do not have
their row_splits dtype changed.
name: (Optional.) A string indicating a name for the `tf.data` operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# ragged_batch_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import ragged_batch_op
return ragged_batch_op._ragged_batch(self, batch_size, drop_remainder,
row_splits_dtype, name)
# pylint: enable=g-import-not-at-top,protected-access
def sparse_batch(self, batch_size, row_shape, name=None) -> "DatasetV2":
"""Combines consecutive elements into `tf.sparse.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.sparse.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.sparse.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.sparse.SparseTensor`. Each element of this dataset must have the
same rank as `row_shape`, and must have size less than or equal to
`row_shape` in each dimension.
name: (Optional.) A string indicating a name for the `tf.data` operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# sparse_batch_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import sparse_batch_op
return sparse_batch_op._sparse_batch(self, batch_size, row_shape, name)
# pylint: disable=g-import-not-at-top,protected-access
def map(
self,
map_func,
num_parallel_calls=None,
deterministic=None,
synchronous=None,
use_unbounded_threadpool=False,
name=None,
) -> "DatasetV2":
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input. `map_func` can be used to change both
the values and the structure of a dataset's elements. Supported structure
constructs are documented
[here](https://www.tensorflow.org/guide/data#dataset_structure).
For example, `map` can be used for adding 1 to each element, or projecting a
subset of element components.
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[2, 3, 4, 5, 6]
The input signature of `map_func` is determined by the structure of each
element in this dataset.
>>> dataset = Dataset.range(5)
>>> # `map_func` takes a single argument of type `tf.Tensor` with the same
>>> # shape and dtype.
>>> result = dataset.map(lambda x: x + 1)
>>> # Each element is a tuple containing two `tf.Tensor` objects.
>>> elements = [(1, "foo"), (2, "bar"), (3, "baz")]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, (tf.int32, tf.string))
>>> # `map_func` takes two arguments of type `tf.Tensor`. This function
>>> # projects out just the first component.
>>> result = dataset.map(lambda x_int, y_str: x_int)
>>> [a.item() for a in result.as_numpy_iterator()]
[1, 2, 3]
>>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.
>>> elements = ([{"a": 1, "b": "foo"},
... {"a": 2, "b": "bar"},
... {"a": 3, "b": "baz"}])
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, {"a": tf.int32, "b": tf.string})
>>> # `map_func` takes a single argument of type `dict` with the same keys
>>> # as the elements.
>>> result = dataset.map(lambda d: str(d["a"]) + d["b"])
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> # `map_func` returns two `tf.Tensor` objects.
>>> def g(x):
... return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
>>> result = dataset.map(g)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), \
dtype=tf.string, name=None))
>>> # Python primitives, lists, and NumPy arrays are implicitly converted to
>>> # `tf.Tensor`.
>>> def h(x):
... return 37.0, ["Foo", "Bar"], np.array([1.0, 2.0], dtype=np.float64)
>>> result = dataset.map(h)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), \
dtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, \
name=None))
>>> # `map_func` can return nested structures.
>>> def i(x):
... return (37.0, [42, 16]), "foo"
>>> result = dataset.map(i)
>>> result.element_spec
((TensorSpec(shape=(), dtype=tf.float32, name=None),
TensorSpec(shape=(2,), dtype=tf.int32, name=None)),
TensorSpec(shape=(), dtype=tf.string, name=None))
`map_func` can accept as arguments and return any type of dataset element.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have a few options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1). For example:
>>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
>>> # transform a string tensor to upper case string using a Python function
>>> def upper_case_fn(t: tf.Tensor):
... return t.numpy().decode('utf-8').upper()
>>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,
... inp=[x], Tout=tf.string))
>>> list(d.as_numpy_iterator())
[b'HELLO', b'WORLD']
3) Use `tf.numpy_function`, which also allows you to write arbitrary
Python code. Note that `tf.py_function` accepts `tf.Tensor` whereas
`tf.numpy_function` accepts numpy arrays and returns only numpy arrays.
For example:
>>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
>>> def upper_case_fn(t: np.ndarray):
... return t.decode('utf-8').upper()
>>> d = d.map(lambda x: tf.numpy_function(func=upper_case_fn,
... inp=[x], Tout=tf.string))
>>> list(d.as_numpy_iterator())
[b'HELLO', b'WORLD']
Note that the use of `tf.numpy_function` and `tf.py_function`
in general precludes the possibility of executing user-defined
transformations in parallel (because of Python GIL).
Performance can often be improved by setting `num_parallel_calls` so that
`map` will use multiple threads to process elements. If deterministic order
isn't required, it can also improve performance to set
`deterministic=False`.
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1,
... num_parallel_calls=tf.data.AUTOTUNE,
... deterministic=False)
The order of elements yielded by this transformation is deterministic if
`deterministic=True`. If `map_func` contains stateful operations and
`num_parallel_calls > 1`, the order in which that state is accessed is
undefined, so the values of output elements may not be deterministic
regardless of the `deterministic` flag value.
Args:
map_func: A function mapping a dataset element to another dataset element.
num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If the value `tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU. If not specified, the
`tf.data.Options.experimental_optimization.map_parallelization` option
(`True` by default) controls whether the map will run as with
`tf.data.AUTOTUNE` or run sequentially.
deterministic: (Optional.) When `num_parallel_calls` is specified, if this
boolean is specified (`True` or `False`), it controls the order in which
the transformation produces elements. If set to `False`, the
transformation is allowed to yield elements out of order to trade
determinism for performance. If not specified, the
`tf.data.Options.deterministic` option (`True` by default) controls the
behavior.
synchronous: (Optional.) Whether to force the map transformation to run
synchronously. This only matters when
`options.experimental_optimization.map_parallelization=True`. That
option would normally change the map to run with
`num_parallel_calls=tf.data.AUTOTUNE`, but if `synchronous=True` is
specified, the map will not be parallelized at all. This is useful for
saving memory, since even setting `num_parallel_calls=1` will cause one
batch to be buffered, while with `synchronous=True` the map
transformation doesn't buffer anything.
use_unbounded_threadpool: (Optional.) By default, map functions run in a
limited threadpool based on the number of cores on the machine. This
efficient for CPU-heavy processing, but if the map function performs IO
it is better to use an unbounded threadpool by setting it to `True`. It
is `False` by default.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> map_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import map_op
return map_op._map_v2(
self,
map_func,
num_parallel_calls=num_parallel_calls,
deterministic=deterministic,
synchronous=synchronous,
use_unbounded_threadpool=use_unbounded_threadpool,
name=name,
)
# pylint: enable=g-import-not-at-top,protected-access
def flat_map(self, map_func, name=None) -> "DatasetV2":
"""Maps `map_func` across this dataset and flattens the result.
The type signature is:
```
def flat_map(
self: Dataset[T],
map_func: Callable[[T], Dataset[S]]
) -> Dataset[S]
```
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
>>> dataset = tf.data.Dataset.from_tensor_slices(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2, 3, 4, 5, 6, 7, 8, 9]
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a dataset element to a dataset.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> flat_map_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import flat_map_op
return flat_map_op._flat_map(self, map_func, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def ignore_errors(self, log_warning=False, name=None) -> "DatasetV2":
"""Drops elements that cause errors.
>>> dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
>>> dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, ""))
>>> list(dataset.as_numpy_iterator())
Traceback (most recent call last):
...
InvalidArgumentError: ... Tensor had Inf values
>>> dataset = dataset.ignore_errors()
>>> list(dataset.as_numpy_iterator())
[1.0, 0.5, 0.25]
Args:
log_warning: (Optional.) A bool indicating whether or not ignored errors
should be logged to stderr. Defaults to `False`.
name: (Optional.) A string indicating a name for the `tf.data` operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# ignore_errors_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import ignore_errors_op
return ignore_errors_op._ignore_errors(self, log_warning, name)
# pylint: enable=g-import-not-at-top,protected-access
def interleave(
self,
map_func,
cycle_length=None,
block_length=None,
num_parallel_calls=None,
deterministic=None,
name=None,
) -> "DatasetV2":
"""Maps `map_func` across this dataset, and interleaves the results.
The type signature is:
```
def interleave(
self: Dataset[T],
map_func: Callable[[T], Dataset[S]]
) -> Dataset[S]
```
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
>>> # Preprocess 4 files concurrently, and interleave blocks of 16 records
>>> # from each file.
>>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> def parse_fn(filename):
... return tf.data.Dataset.range(10)
>>> dataset = dataset.interleave(lambda x:
... tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
... cycle_length=4, block_length=16)
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> # NOTE: New lines indicate "block" boundaries.
>>> dataset = dataset.interleave(
... lambda x: Dataset.from_tensors(x).repeat(6),
... cycle_length=2, block_length=4)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 1, 1, 1,
2, 2, 2, 2,
1, 1,
2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
3, 3,
4, 4,
5, 5, 5, 5,
5, 5]
Note: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function and
`deterministic=True`. If `map_func` contains any stateful operations, the
order in which that state is accessed is undefined.
Performance can often be improved by setting `num_parallel_calls` so that
`interleave` will use multiple threads to fetch elements. If determinism
isn't required, it can also improve performance to set
`deterministic=False`.
>>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),
... cycle_length=4, num_parallel_calls=tf.data.AUTOTUNE,
... deterministic=False)
Args:
map_func: A function that takes a dataset element and returns a
`tf.data.Dataset`.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not set, the tf.data runtime decides what it
should be based on available CPU. If `num_parallel_calls` is set to
`tf.data.AUTOTUNE`, the `cycle_length` argument identifies
the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element. If not
set, defaults to 1.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) When `num_parallel_calls` is specified, if this
boolean is specified (`True` or `False`), it controls the order in which
the transformation produces elements. If set to `False`, the
transformation is allowed to yield elements out of order to trade
determinism for performance. If not specified, the
`tf.data.Options.deterministic` option (`True` by default) controls the
behavior.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> interleave_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import interleave_op
return interleave_op._interleave(self, map_func, cycle_length, block_length,
num_parallel_calls, deterministic, name)
# pylint: enable=g-import-not-at-top,protected-access
def filter(self, predicate, name=None) -> "DatasetV2":
"""Filters this dataset according to `predicate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.filter(lambda x: x < 3)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2]
>>> # `tf.math.equal(x, y)` is required for equality comparison
>>> def filter_fn(x):
... return tf.math.equal(x, 1)
>>> dataset = dataset.filter(filter_fn)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1]
Args:
predicate: A function mapping a dataset element to a boolean.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> filter_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import filter_op
return filter_op._filter(self, predicate, name)
# pylint: enable=g-import-not-at-top,protected-access
def apply(self, transformation_func) -> "DatasetV2":
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
>>> dataset = tf.data.Dataset.range(100)
>>> def dataset_fn(ds):
... return ds.filter(lambda x: x < 5)
>>> dataset = dataset.apply(dataset_fn)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2, 3, 4]
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
dataset = transformation_func(self)
if not isinstance(dataset, data_types.DatasetV2):
raise TypeError(
f"`transformation_func` must return a `tf.data.Dataset` object. "
f"Got {type(dataset)}.")
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(
self, size, shift=None, stride=1, drop_remainder=False, name=None
) -> "DatasetV2":
"""Returns a dataset of "windows".
Each "window" is a dataset that contains a subset of elements of the
input dataset. These are finite datasets of size `size` (or possibly fewer
if there are not enough input elements to fill the window and
`drop_remainder` evaluates to `False`).
For example:
>>> dataset = tf.data.Dataset.range(7).window(3)
>>> for window in dataset:
... print(window)
<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>
<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>
<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>
Since windows are datasets, they can be iterated over:
>>> for window in dataset:
... print([a.item() for a in window.as_numpy_iterator()])
[0, 1, 2]
[3, 4, 5]
[6]
#### Shift
The `shift` argument determines the number of input elements to shift
between the start of each window. If windows and elements are both numbered
starting at 0, the first element in window `k` will be element `k * shift`
of the input dataset. In particular, the first element of the first window
will always be the first element of the input dataset.
>>> dataset = tf.data.Dataset.range(7).window(3, shift=1,
... drop_remainder=True)
>>> for window in dataset:
... print([a.item() for a in window.as_numpy_iterator()])
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
[4, 5, 6]
#### Stride
The `stride` argument determines the stride between input elements within a
window.
>>> dataset = tf.data.Dataset.range(7).window(3, shift=1, stride=2,
... drop_remainder=True)
>>> for window in dataset:
... print([a.item() for a in window.as_numpy_iterator()])
[0, 2, 4]
[1, 3, 5]
[2, 4, 6]
#### Nested elements
When the `window` transformation is applied to a dataset whose elements are
nested structures, it produces a dataset where the elements have the same
nested structure but each leaf is replaced by a window. In other words,
the nesting is applied outside of the windows as opposed inside of them.
The type signature is:
```
def window(
self: Dataset[Nest[T]], ...
) -> Dataset[Nest[Dataset[T]]]
```
Applying `window` to a `Dataset` of tuples gives a tuple of windows:
>>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2, 3, 4, 5],
... [6, 7, 8, 9, 10]))
>>> dataset = dataset.window(2)
>>> windows = next(iter(dataset))
>>> windows
(<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int32, name=None)>,
<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int32, name=None)>)
>>> def to_numpy(ds):
... return [a.item() for a in ds.as_numpy_iterator()]
>>>
>>> for windows in dataset:
... print(to_numpy(windows[0]), to_numpy(windows[1]))
[1, 2] [6, 7]
[3, 4] [8, 9]
[5] [10]
Applying `window` to a `Dataset` of dictionaries gives a dictionary of
`Datasets`:
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3],
... 'b': [4, 5, 6],
... 'c': [7, 8, 9]})
>>> dataset = dataset.window(2)
>>> def to_numpy(ds):
... return [a.item() for a in ds.as_numpy_iterator()]
>>>
>>> for windows in dataset:
... print(tf.nest.map_structure(to_numpy, windows))
{'a': [1, 2], 'b': [4, 5], 'c': [7, 8]}
{'a': [3], 'b': [6], 'c': [9]}
#### Flatten a dataset of windows
The `Dataset.flat_map` and `Dataset.interleave` methods can be used to
flatten a dataset of windows into a single dataset.
The argument to `flat_map` is a function that takes an element from the
dataset and returns a `Dataset`. `flat_map` chains together the resulting
datasets sequentially.
For example, to turn each window into a dense tensor:
>>> dataset = tf.data.Dataset.range(7).window(3, shift=1,
... drop_remainder=True)
>>> batched = dataset.flat_map(lambda x:x.batch(3))
>>> for batch in batched:
... print(batch.numpy())
[0 1 2]
[1 2 3]
[2 3 4]
[3 4 5]
[4 5 6]
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window. Must be positive.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of input elements by which the window moves in each iteration.
Defaults to `size`. Must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window. Must be positive.
The default value of 1 means "retain every input element".
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last windows should be dropped if their size is smaller than
`size`.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> window_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import window_op
return window_op._window(self, size, shift, stride, drop_remainder, name)
# pylint: enable=g-import-not-at-top,protected-access
def reduce(self, initial_state, reduce_func, name=None):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x +
... 1).numpy().item()
5
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x +
... y).numpy().item()
10
Args:
initial_state: An element representing the initial state of the
transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a new element The
structure of `new_state` must match the structure of `initial_state`.
name: (Optional.) A name for the tf.data operation.
Returns:
A dataset element corresponding to the final state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = structured_function.StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
f"The element classes for the new state must match the initial "
f"state. Expected {state_classes} but got "
f"{wrapped_func.output_classes}.")
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
f"The element types for the new state must match the initial "
f"state. Expected {state_types} but got "
f"{wrapped_func.output_types}.")
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
dataset = self._apply_debug_options()
# pylint: disable=protected-access
metadata = dataset_metadata_pb2.Metadata()
if name:
metadata.name = _validate_and_encode(name)
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure),
metadata=metadata.SerializeToString()))
def get_single_element(self, name=None):
"""Returns the single element of the `dataset`.
The function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating an iterator.
This facilitates the ease of data transformation on tensors using the
optimized `tf.data.Dataset` abstraction on top of them.
For example, lets consider a `preprocessing_fn` which would take as an
input the raw features and returns the processed feature along with
it's label.
```python
def preprocessing_fn(raw_feature):
# ... the raw_feature is preprocessed as per the use-case
return feature
raw_features = ... # input batch of BATCH_SIZE elements.
dataset = (tf.data.Dataset.from_tensor_slices(raw_features)
.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
.batch(BATCH_SIZE))
processed_features = dataset.get_single_element()
```
In the above example, the `raw_features` tensor of length=BATCH_SIZE
was converted to a `tf.data.Dataset`. Next, each of the `raw_feature` was
mapped using the `preprocessing_fn` and the processed features were
grouped into a single batch. The final `dataset` contains only one element
which is a batch of all the processed features.
NOTE: The `dataset` should contain only one element.
Now, instead of creating an iterator for the `dataset` and retrieving the
batch of features, the `tf.data.get_single_element()` function is used
to skip the iterator creation process and directly output the batch of
features.
This can be particularly useful when your tensor transformations are
expressed as `tf.data.Dataset` operations, and you want to use those
transformations while serving your model.
#### Keras
```python
model = ... # A pre-built or custom model
class PreprocessingModel(tf.keras.Model):
def __init__(self, model):
super().__init__(self)
self.model = model
@tf.function(input_signature=[...])
def serving_fn(self, data):
ds = tf.data.Dataset.from_tensor_slices(data)
ds = ds.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
ds = ds.batch(batch_size=BATCH_SIZE)
return tf.argmax(self.model(ds.get_single_element()), axis=-1)
preprocessing_model = PreprocessingModel(model)
your_exported_model_dir = ... # save the model to this path.
tf.saved_model.save(preprocessing_model, your_exported_model_dir,
signatures={'serving_default': preprocessing_model.serving_fn}
)
```
Args:
name: (Optional.) A name for the tf.data operation.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
InvalidArgumentError: (at runtime) if `dataset` does not contain exactly
one element.
"""
metadata = dataset_metadata_pb2.Metadata()
if name:
metadata.name = _validate_and_encode(name)
return structure.from_compatible_tensor_list(
self.element_spec,
gen_dataset_ops.dataset_to_single_element(
self._variant_tensor,
metadata=metadata.SerializeToString(),
**self._flat_structure)) # pylint: disable=protected-access
def unbatch(self, name=None) -> "DatasetV2":
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
>>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]
>>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)
>>> dataset = dataset.unbatch()
>>> [a.item() for a in dataset.as_numpy_iterator()]
[1, 2, 3, 1, 2, 1, 2, 3, 4]
Note: `unbatch` requires a data copy to slice up the batched tensor into
smaller, unbatched tensors. When optimizing performance, try to avoid
unnecessary usage of `unbatch`.
Args:
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> unbatch_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import unbatch_op
return unbatch_op._unbatch(self, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def with_options(self, options, name=None) -> "DatasetV2":
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
>>> ds = tf.data.Dataset.range(5)
>>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),
... cycle_length=3,
... num_parallel_calls=3)
>>> options = tf.data.Options()
>>> # This will make the interleave order non-deterministic.
>>> options.deterministic = False
>>> ds = ds.with_options(options)
Args:
options: A `tf.data.Options` that identifies the options the use.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options, name=name)
def cardinality(self):
"""Returns the cardinality of the dataset, if known.
`cardinality` may return `tf.data.INFINITE_CARDINALITY` if the dataset
contains an infinite number of elements or `tf.data.UNKNOWN_CARDINALITY` if
the analysis fails to determine the number of elements in the dataset
(e.g. when the dataset source is a file).
>>> dataset = tf.data.Dataset.range(42)
>>> print(dataset.cardinality().numpy())
42
>>> dataset = dataset.repeat()
>>> cardinality = dataset.cardinality()
>>> print((cardinality == tf.data.INFINITE_CARDINALITY).numpy())
True
>>> dataset = dataset.filter(lambda x: True)
>>> cardinality = dataset.cardinality()
>>> print((cardinality == tf.data.UNKNOWN_CARDINALITY).numpy())
True
Returns:
A scalar `tf.int64` `Tensor` representing the cardinality of the dataset.
If the cardinality is infinite or unknown, `cardinality` returns the
named constants `tf.data.INFINITE_CARDINALITY` and
`tf.data.UNKNOWN_CARDINALITY` respectively.
"""
return gen_dataset_ops.dataset_cardinality(self._variant_tensor)
def group_by_window(
self,
key_func,
reduce_func,
window_size=None,
window_size_func=None,
name=None,
) -> "DatasetV2":
"""Groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined
by the key through `window_size_func`.
>>> dataset = tf.data.Dataset.range(10)
>>> window_size = 5
>>> key_func = lambda x: x%2
>>> reduce_func = lambda key, dataset: dataset.batch(window_size)
>>> dataset = dataset.group_by_window(
... key_func=key_func,
... reduce_func=reduce_func,
... window_size=window_size)
>>> for elem in dataset.as_numpy_iterator():
... print(elem)
[0 2 4 6 8]
[1 3 5 7 9]
Args:
key_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single batch,
which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> group_by_window_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import group_by_window_op
return group_by_window_op._group_by_window(
self, key_func, reduce_func, window_size, window_size_func, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def bucket_by_sequence_length(
self,
element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False,
drop_remainder=False,
name=None,
) -> "DatasetV2":
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable
length. Grouping together elements that have similar lengths reduces the
total fraction of padding in a batch which increases training step
efficiency.
Below is an example to bucketize the input data to the 3 buckets
"[0, 3), [3, 5), [5, inf)" based on sequence length, with batch size 2.
>>> elements = [
... [0], [1, 2, 3, 4], [5, 6, 7],
... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, tf.int64, output_shapes=[None])
>>> dataset = dataset.bucket_by_sequence_length(
... element_length_func=lambda elem: tf.shape(elem)[0],
... bucket_boundaries=[3, 5],
... bucket_batch_sizes=[2, 2, 2])
>>> for elem in dataset.as_numpy_iterator():
... print(elem)
[[1 2 3 4]
[5 6 7 0]]
[[ 7 8 9 10 11 0]
[13 14 15 16 19 20]]
[[ 0 0]
[21 22]]
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in
each bucket), and caller must ensure that the source `Dataset` does not
contain any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.sparse.SparseTensor` or of same shape).
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):
raise ValueError(
f"`len(bucket_batch_sizes)` must equal `len(bucket_boundaries) + 1` "
f"but `len(bucket_batch_sizes)={len(bucket_batch_sizes)}` and "
f"`len(bucket_boundaries)={len(bucket_boundaries)}`.")
batch_sizes = constant_op.constant(bucket_batch_sizes, dtype=dtypes.int64)
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
# The window size is set to the batch size for this bucket
window_size = batch_sizes[bucket_id]
return window_size
def make_padded_shapes(shapes, none_filler=None):
padded = []
for shape in nest.flatten(shapes):
shape = tensor_shape.TensorShape(shape)
shape = [
none_filler if tensor_shape.dimension_value(d) is None else d
for d in shape
]
padded.append(shape)
return nest.pack_sequence_as(shapes, padded)
def batching_fn(bucket_id, grouped_dataset):
"""Batch elements in dataset."""
batch_size = window_size_fn(bucket_id)
if no_padding:
return grouped_dataset.batch(
batch_size, drop_remainder=drop_remainder, name=name)
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
"length < max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(
len(bucket_batch_sizes) - 1, dtype=dtypes.int64),
message=err_msg)
with ops.control_dependencies([check]):
boundaries = constant_op.constant(
bucket_boundaries, dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
none_filler = bucket_boundary - 1
input_shapes = get_legacy_output_shapes(grouped_dataset)
shapes = make_padded_shapes(
padded_shapes or input_shapes, none_filler=none_filler)
return grouped_dataset.padded_batch(
batch_size,
shapes,
padding_values,
drop_remainder=drop_remainder,
name=name)
return self.group_by_window(
key_func=element_to_bucket_id,
reduce_func=batching_fn,
window_size_func=window_size_fn,
name=name)
@staticmethod
def random(
seed=None, rerandomize_each_iteration=None, name=None
) -> "DatasetV2":
"""Creates a `Dataset` of pseudorandom values.
The dataset generates a sequence of uniformly distributed integer values.
`rerandomize_each_iteration` controls whether the sequence of random number
generated should be re-randomized for each epoch. The default value is False
where the dataset generates the same sequence of random numbers for each
epoch.
>>> ds1 = tf.data.Dataset.random(seed=4).take(10)
>>> ds2 = tf.data.Dataset.random(seed=4).take(10)
>>> print(list(ds1.as_numpy_iterator())==list(ds2.as_numpy_iterator()))
True
>>> ds3 = tf.data.Dataset.random(seed=4).take(10)
>>> ds3_first_epoch = list(ds3.as_numpy_iterator())
>>> ds3_second_epoch = list(ds3.as_numpy_iterator())
>>> print(ds3_first_epoch == ds3_second_epoch)
True
>>> ds4 = tf.data.Dataset.random(
... seed=4, rerandomize_each_iteration=True).take(10)
>>> ds4_first_epoch = list(ds4.as_numpy_iterator())
>>> ds4_second_epoch = list(ds4.as_numpy_iterator())
>>> print(ds4_first_epoch == ds4_second_epoch)
False
Args:
seed: (Optional) If specified, the dataset produces a deterministic
sequence of values.
rerandomize_each_iteration: (Optional) If set to False, the dataset
generates the same sequence of random numbers for each epoch. If set to
True, it generates a different deterministic sequence of random numbers
for each epoch. It is defaulted to False if left unspecified.
name: (Optional.) A name for the tf.data operation.
Returns:
Dataset: A `Dataset`.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> random_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import random_op
return random_op._random(
seed=seed,
rerandomize_each_iteration=rerandomize_each_iteration,
name=name)
# pylint: enable=g-import-not-at-top,protected-access
def snapshot(
self,
path,
compression="AUTO",
reader_func=None,
shard_func=None,
name=None,
) -> "DatasetV2":
"""API to persist the output of the input dataset.
The snapshot API allows users to transparently persist the output of their
preprocessing pipeline to disk, and materialize the pre-processed data on a
different training run.
This API enables repeated preprocessing steps to be consolidated, and allows
re-use of already processed data, trading off disk storage and network
bandwidth for freeing up more valuable CPU resources and accelerator compute
time.
https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md
has detailed design documentation of this feature.
Users can specify various options to control the behavior of snapshot,
including how snapshots are read from and written to by passing in
user-defined functions to the `reader_func` and `shard_func` parameters.
`shard_func` is a user specified function that maps input elements to
snapshot shards.
Users may want to specify this function to control how snapshot files should
be written to disk. Below is an example of how a potential `shard_func`
could be written.
```python
dataset = ...
dataset = dataset.enumerate()
dataset = dataset.snapshot("/path/to/snapshot/dir",
shard_func=lambda x, y: x % NUM_SHARDS, ...)
dataset = dataset.map(lambda x, y: y)
```
`reader_func` is a user specified function that accepts a single argument:
(1) a Dataset of Datasets, each representing a "split" of elements of the
original dataset. The cardinality of the input dataset matches the
number of the shards specified in the `shard_func` (see above). The function
should return a Dataset of elements of the original dataset.
Users may want specify this function to control how snapshot files should be
read from disk, including the amount of shuffling and parallelism.
Here is an example of a standard reader function a user can define. This
function enables both dataset shuffling and parallel reading of datasets:
```python
def user_reader_func(datasets):
# shuffle the datasets splits
datasets = datasets.shuffle(NUM_CORES)
# read datasets in parallel and interleave their elements
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = dataset.snapshot("/path/to/snapshot/dir",
reader_func=user_reader_func)
```
By default, snapshot parallelizes reads by the number of cores available on
the system, but will not attempt to shuffle the data.
Args:
path: Required. A directory to use for storing / loading the snapshot to /
from.
compression: Optional. The type of compression to apply to the snapshot
written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None.
Defaults to `AUTO`, which attempts to pick an appropriate compression
algorithm for the dataset.
reader_func: Optional. A function to control how to read data from
snapshot shards.
shard_func: Optional. A function to control how to shard data when writing
a snapshot.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> snapshot_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import snapshot_op
return snapshot_op._snapshot(
self, path, compression, reader_func, shard_func, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def scan(self, initial_state, scan_func, name=None) -> "DatasetV2":
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
>>> dataset = tf.data.Dataset.range(10)
>>> initial_state = tf.constant(0, dtype=tf.int64)
>>> scan_func = lambda state, i: (state + i, state + i)
>>> dataset = dataset.scan(initial_state=initial_state, scan_func=scan_func)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
Args:
initial_state: A nested structure of tensors, representing the initial
state of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element)`. It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops ->
# scan_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import scan_op
return scan_op._scan(self, initial_state, scan_func, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def take_while(self, predicate, name=None) -> "DatasetV2":
"""A transformation that stops dataset iteration based on a `predicate`.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.take_while(lambda x: x < 5)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2, 3, 4]
Args:
predicate: A function that maps a nested structure of tensors (having
shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.bool` tensor.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (
# dataset_ops -> take_while_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import take_while_op
return take_while_op._take_while(self, predicate, name=name)
# pylint: enable=g-import-not-at-top,protected-access
def unique(self, name=None) -> "DatasetV2":
"""A transformation that discards duplicate elements of a `Dataset`.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
>>> dataset = dataset.unique()
>>> sorted([a.item() for a in dataset.as_numpy_iterator()])
[1, 2, 37]
Note: This transformation only supports datasets which fit into memory
and have elements of either `tf.int32`, `tf.int64` or `tf.string` type.
Args:
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# Loaded lazily due to a circular dependency (dataset_ops -> unique_op ->
# dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import unique_op
return unique_op._unique(self, name)
# pylint: enable=g-import-not-at-top,protected-access
def rejection_resample(
self, class_func, target_dist, initial_dist=None, seed=None, name=None
) -> "DatasetV2":
"""Resamples elements to reach a target distribution.
Note: This implementation can reject **or repeat** elements in order to
reach the `target_dist`. So, in some cases, the output `Dataset` may be
larger than the input `Dataset`.
>>> initial_dist = [0.6, 0.4]
>>> n = 1000
>>> elems = np.random.choice(len(initial_dist), size=n, p=initial_dist)
>>> dataset = tf.data.Dataset.from_tensor_slices(elems)
>>> zero, one = np.bincount(list(dataset.as_numpy_iterator())) / n
Following from `initial_dist`, `zero` is ~0.6 and `one` is ~0.4.
>>> target_dist = [0.5, 0.5]
>>> dataset = dataset.rejection_resample(
... class_func=lambda x: x,
... target_dist=target_dist,
... initial_dist=initial_dist)
>>> dataset = dataset.map(lambda class_func_result, data: data)
>>> zero, one = np.bincount(list(dataset.as_numpy_iterator())) / n
Following from `target_dist`, `zero` is ~0.5 and `one` is ~0.5.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above.
"""
# TODO(b/245793127): Consider switching back to the 'v1' implementation.
target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
target_dist_t = math_ops.cast(target_dist_t, dtypes.float32)
# Get initial distribution.
if initial_dist is not None:
initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
initial_dist_t = math_ops.cast(initial_dist_t, dtypes.float32)
acceptance_dist, prob_of_original = (
_calculate_acceptance_probs_with_mixing(initial_dist_t,
target_dist_t))
initial_dist_ds = DatasetV2.from_tensors(
initial_dist_t, name=name).repeat(name=name)
acceptance_dist_ds = DatasetV2.from_tensors(
acceptance_dist, name=name).repeat(name=name)
prob_of_original_ds = DatasetV2.from_tensors(
prob_of_original, name=name).repeat(name=name)
else:
initial_dist_ds = _estimate_initial_dist_ds(
target_dist_t, self.map(class_func, name=name), name=name)
acceptance_and_original_prob_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda
initial, target_dist_t),
name=name)
acceptance_dist_ds = acceptance_and_original_prob_ds.map(
lambda accept_prob, _: accept_prob, name=name)
prob_of_original_ds = acceptance_and_original_prob_ds.map(
lambda _, prob_original: prob_original, name=name)
filtered_ds = _filter_ds(self, acceptance_dist_ds, initial_dist_ds,
class_func, seed)
# Prefetch filtered dataset for speed.
filtered_ds = filtered_ds.prefetch(3, name=name)
prob_original_static = _get_prob_original_static(
initial_dist_t, target_dist_t) if initial_dist is not None else None
def add_class_value(*x):
if len(x) == 1:
return class_func(*x), x[0]
else:
return class_func(*x), x
if prob_original_static == 1:
return self.map(add_class_value, name=name)
elif prob_original_static == 0:
return filtered_ds
else:
return Dataset.sample_from_datasets(
[self.map(add_class_value), filtered_ds],
weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]),
seed=seed,
stop_on_empty_dataset=True)
@staticmethod
def sample_from_datasets(
datasets,
weights=None,
seed=None,
stop_on_empty_dataset=False,
rerandomize_each_iteration=None,
) -> "DatasetV2":
"""Samples elements at random from the datasets in `datasets`.
Creates a dataset by interleaving elements of `datasets` with `weight[i]`
probability of picking an element from dataset `i`. Sampling is done without
replacement. For example, suppose we have 2 datasets:
```python
dataset1 = tf.data.Dataset.range(0, 3)
dataset2 = tf.data.Dataset.range(100, 103)
```
Suppose that we sample from these 2 datasets with the following weights:
```python
sample_dataset = tf.data.Dataset.sample_from_datasets(
[dataset1, dataset2], weights=[0.5, 0.5])
```
One possible outcome of elements in sample_dataset is:
```
print(list(sample_dataset.as_numpy_iterator()))
# [100, 0, 1, 101, 2, 102]
```
Args:
datasets: A non-empty list of `tf.data.Dataset` objects with compatible
structure.
weights: (Optional.) A list or Tensor of `len(datasets)` floating-point
values where `weights[i]` represents the probability to sample from
`datasets[i]`, or a `tf.data.Dataset` object where each element is such
a list. Defaults to a uniform distribution across `datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
stop_on_empty_dataset: If `True`, sampling stops if it encounters an empty
dataset. If `False`, it continues sampling and skips any empty datasets.
It is recommended to set it to `True`. Otherwise, the distribution of
samples starts off as the user intends, but may change as input datasets
become empty. This can be difficult to detect since the dataset starts
off looking correct. Default to `False` for backward compatibility.
rerandomize_each_iteration: An optional `bool`. The boolean argument
controls whether the sequence of random numbers used to determine which
dataset to sample from will be rerandomized each epoch. That is, it
determinies whether datasets will be sampled in the same order across
different epochs (the default behavior) or not.
Returns:
A dataset that interleaves elements from `datasets` at random, according
to `weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError:
- If `datasets` is empty, or
- If `weights` is specified and does not match the length of `datasets`.
"""
# Loaded lazily due to a circular dependency
# (dataset_ops -> sample_from_datasets_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import sample_from_datasets_op
return sample_from_datasets_op._sample_from_datasets( # pylint: disable=protected-access
datasets,
weights,
seed,
stop_on_empty_dataset,
rerandomize_each_iteration,
)
# pylint: enable=g-import-not-at-top,protected-access
@staticmethod
def choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=True
) -> "DatasetV2":
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.Dataset.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A non-empty list of `tf.data.Dataset` objects with compatible
structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
stop_on_empty_dataset: If `True`, selection stops if it encounters an
empty dataset. If `False`, it skips empty datasets. It is recommended to
set it to `True`. Otherwise, the selected elements start off as the user
intends, but may change as input datasets become empty. This can be
difficult to detect since the dataset starts off looking correct.
Defaults to `True`.
Returns:
A new `Dataset` with the transformation applied as described above.
Raises:
TypeError: If `datasets` or `choice_dataset` has the wrong type.
ValueError: If `datasets` is empty.
"""
# Loaded lazily due to a circular dependency
# (dataset_ops -> choose_from_datasets_op -> dataset_ops).
# pylint: disable=g-import-not-at-top,protected-access
from tensorflow.python.data.ops import choose_from_datasets_op
return choose_from_datasets_op._choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset)
# pylint: enable=g-import-not-at-top,protected-access
@tf_export(v1=["data.Dataset"])
| DatasetV2 |
python | great-expectations__great_expectations | great_expectations/expectations/expectation.py | {
"start": 97348,
"end": 109159
} | class ____(BatchExpectation, ABC):
"""Base class for ColumnPairMapExpectations.
ColumnPairMapExpectations are evaluated for a pair of columns and ask a yes/no question about the row-wise
relationship between those two columns. Based on the result, they then calculate the percentage of rows
that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
ColumnPairMapExpectations must implement a `_validate(...)` method containing logic
for determining whether the Expectation is successfully validated.
Raises:
InvalidExpectationConfigurationError: If `column_A` and `column_B` parameters are missing from the configuration.
Args:
domain_keys (tuple): A tuple of the keys used to determine the domain of the
expectation.
success_keys (tuple): A tuple of the keys used to determine the success of
the expectation.
""" # noqa: E501 # FIXME CoP
column_A: StrictStr = Field(min_length=1, description=COLUMN_A_DESCRIPTION)
column_B: StrictStr = Field(min_length=1, description=COLUMN_B_DESCRIPTION)
mostly: MostlyField = 1
row_condition: RowConditionType = None
condition_parser: Union[ConditionParser, None] = None
catch_exceptions: bool = True
map_metric: ClassVar[Optional[str]] = None
domain_keys = (
"batch_id",
"column_A",
"column_B",
"row_condition",
"condition_parser",
)
domain_type: ClassVar[MetricDomainTypes] = MetricDomainTypes.COLUMN_PAIR
success_keys: ClassVar[Tuple[str, ...]] = ("mostly",)
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ColumnPairMapExpectation]) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"domain_type": {
"title": "Domain Type",
"type": "string",
"const": model.domain_type,
"description": "Column Pair Map",
}
}
)
@classmethod
@override
def is_abstract(cls) -> bool:
return cls.map_metric is None or super().is_abstract()
@override
def get_validation_dependencies(
self,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) -> ValidationDependencies:
validation_dependencies: ValidationDependencies = super().get_validation_dependencies(
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
assert isinstance(self.map_metric, str), (
"ColumnPairMapExpectation must override get_validation_dependencies "
"or declare exactly one map_metric"
)
assert self.metric_dependencies == tuple(), (
"ColumnPairMapExpectation must be configured using map_metric, "
"and cannot have metric_dependencies declared."
)
metric_kwargs: dict
configuration = self.configuration
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name="table.row_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name="table.row_count",
metric_configuration=MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
result_format_str: Optional[str] = validation_dependencies.result_format.get(
"result_format"
)
include_unexpected_rows: Optional[bool] = validation_dependencies.result_format.get(
"include_unexpected_rows"
)
if result_format_str == ResultFormat.BOOLEAN_ONLY:
return validation_dependencies
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
if include_unexpected_rows:
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
if result_format_str == ResultFormat.BASIC:
return validation_dependencies
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
return validation_dependencies
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
result_format: Union[Dict[str, Union[int, str, bool, List[str], None]], str] = (
self._get_result_format(runtime_configuration=runtime_configuration)
)
include_unexpected_rows: bool
unexpected_index_column_names = None
if isinstance(result_format, dict):
include_unexpected_rows = bool(result_format.get("include_unexpected_rows", False))
unexpected_index_column_names = result_format.get("unexpected_index_column_names", None)
else:
include_unexpected_rows = False
total_count: Optional[int] = metrics.get("table.row_count")
unexpected_count: Optional[int] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
)
unexpected_values: Optional[Any] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}"
)
unexpected_index_list: Optional[List[int]] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}"
)
unexpected_index_query: Optional[str] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}"
)
filtered_row_count: Optional[int] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}"
)
unexpected_rows = None
if include_unexpected_rows:
unexpected_rows = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}"
)
if (
total_count is None
or unexpected_count is None
or filtered_row_count is None
or total_count == 0
or filtered_row_count == 0
):
# Vacuously true
success = True
else:
success = _mostly_success(
filtered_row_count,
unexpected_count,
self._get_success_kwargs()["mostly"],
)
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=total_count,
nonnull_count=filtered_row_count,
unexpected_count=unexpected_count,
unexpected_list=unexpected_values,
unexpected_index_list=unexpected_index_list,
unexpected_index_query=unexpected_index_query,
unexpected_index_column_names=unexpected_index_column_names,
unexpected_rows=unexpected_rows,
)
| ColumnPairMapExpectation |
python | coleifer__peewee | peewee.py | {
"start": 160013,
"end": 160061
} | class ____(Field):
field_type = 'ANY'
| AnyField |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0128_addons_notifications.py | {
"start": 150,
"end": 1809
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0127_default_to_semver"),
]
operations = [
migrations.AddField(
model_name="addonsconfig",
name="notifications_enabled",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="addonsconfig",
name="notifications_show_on_external",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="addonsconfig",
name="notifications_show_on_latest",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="addonsconfig",
name="notifications_show_on_non_stable",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="notifications_enabled",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="notifications_show_on_external",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="notifications_show_on_latest",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="notifications_show_on_non_stable",
field=models.BooleanField(default=True),
),
]
| Migration |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 175234,
"end": 178113
} | class ____(CIntLike, CType, EnumMixin):
# name string
# doc string or None
# cname string or None
# typedef_flag boolean
# values [string], populated during declaration analysis
is_enum = 1
signed = 1
rank = -1 # Ranks below any integer type
def __init__(self, name, cname, typedef_flag, namespace=None, doc=None):
self.name = name
self.doc = doc
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
self.namespace = namespace
self.default_value = "(%s) 0" % self.empty_declaration_code()
def __str__(self):
return self.name
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.namespace:
base_code = "%s::%s" % (
self.namespace.empty_declaration_code(), self.cname)
elif self.typedef_flag:
base_code = self.cname
else:
base_code = "enum %s" % self.cname
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def specialize(self, values):
if self.namespace:
namespace = self.namespace.specialize(values)
if namespace != self.namespace:
return CEnumType(
self.name, self.cname, self.typedef_flag, namespace)
return self
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
# Generate "int"-like conversion function
old_to_py_function = self.to_py_function
self.to_py_function = None
CIntLike.create_to_py_utility_code(self, env)
enum_to_pyint_func = self.to_py_function
self.to_py_function = old_to_py_function # we don't actually want to overwrite this
env.use_utility_code(CythonUtilityCode.load(
"EnumType", "CpdefEnums.pyx",
context={"name": self.name,
"items": tuple(self.values),
"enum_doc": self.doc,
"enum_to_pyint_func": enum_to_pyint_func,
"static_modname": env.qualified_name,
},
outer_module_scope=env.global_scope()))
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return self.to_py_function
if not self.entry.create_wrapper:
return super().create_to_py_utility_code(env)
self.create_enum_to_py_utility_code(env)
return True
| CEnumType |
python | mlflow__mlflow | mlflow/models/rag_signatures.py | {
"start": 1383,
"end": 1738
} | class ____:
index: int = 0
delta: Message = field(
default_factory=lambda: Message(
role="assistant",
content="MLflow is an open source platform for the machine learning lifecycle.",
)
)
finish_reason: str = "stop"
@deprecated("mlflow.types.llm.ChatCompletionResponse")
@dataclass
| ChainCompletionChunk |
python | google__python-fire | fire/core_test.py | {
"start": 750,
"end": 9613
} | class ____(testutils.BaseTestCase):
def testOneLineResult(self):
self.assertEqual(core._OneLineResult(1), '1') # pylint: disable=protected-access
self.assertEqual(core._OneLineResult('hello'), 'hello') # pylint: disable=protected-access
self.assertEqual(core._OneLineResult({}), '{}') # pylint: disable=protected-access
self.assertEqual(core._OneLineResult({'x': 'y'}), '{"x": "y"}') # pylint: disable=protected-access
def testOneLineResultCircularRef(self):
circular_reference = tc.CircularReference()
self.assertEqual(core._OneLineResult(circular_reference.create()), # pylint: disable=protected-access
"{'y': {...}}")
@mock.patch('fire.interact.Embed')
def testInteractiveMode(self, mock_embed):
core.Fire(tc.TypedProperties, command=['alpha'])
self.assertFalse(mock_embed.called)
core.Fire(tc.TypedProperties, command=['alpha', '--', '-i'])
self.assertTrue(mock_embed.called)
@mock.patch('fire.interact.Embed')
def testInteractiveModeFullArgument(self, mock_embed):
core.Fire(tc.TypedProperties, command=['alpha', '--', '--interactive'])
self.assertTrue(mock_embed.called)
@mock.patch('fire.interact.Embed')
def testInteractiveModeVariables(self, mock_embed):
core.Fire(tc.WithDefaults, command=['double', '2', '--', '-i'])
self.assertTrue(mock_embed.called)
(variables, verbose), unused_kwargs = mock_embed.call_args
self.assertFalse(verbose)
self.assertEqual(variables['result'], 4)
self.assertIsInstance(variables['self'], tc.WithDefaults)
self.assertIsInstance(variables['trace'], trace.FireTrace)
@mock.patch('fire.interact.Embed')
def testInteractiveModeVariablesWithName(self, mock_embed):
core.Fire(tc.WithDefaults,
command=['double', '2', '--', '-i', '-v'], name='D')
self.assertTrue(mock_embed.called)
(variables, verbose), unused_kwargs = mock_embed.call_args
self.assertTrue(verbose)
self.assertEqual(variables['result'], 4)
self.assertIsInstance(variables['self'], tc.WithDefaults)
self.assertEqual(variables['D'], tc.WithDefaults)
self.assertIsInstance(variables['trace'], trace.FireTrace)
# TODO(dbieber): Use parameterized tests to break up repetitive tests.
def testHelpWithClass(self):
with self.assertRaisesFireExit(0, 'SYNOPSIS.*ARG1'):
core.Fire(tc.InstanceVars, command=['--', '--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*ARG1'):
core.Fire(tc.InstanceVars, command=['--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*ARG1'):
core.Fire(tc.InstanceVars, command=['-h'])
def testHelpWithMember(self):
with self.assertRaisesFireExit(0, 'SYNOPSIS.*capitalize'):
core.Fire(tc.TypedProperties, command=['gamma', '--', '--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*capitalize'):
core.Fire(tc.TypedProperties, command=['gamma', '--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*capitalize'):
core.Fire(tc.TypedProperties, command=['gamma', '-h'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*delta'):
core.Fire(tc.TypedProperties, command=['delta', '--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*echo'):
core.Fire(tc.TypedProperties, command=['echo', '--help'])
def testHelpOnErrorInConstructor(self):
with self.assertRaisesFireExit(0, 'SYNOPSIS.*VALUE'):
core.Fire(tc.ErrorInConstructor, command=['--', '--help'])
with self.assertRaisesFireExit(0, 'INFO:.*SYNOPSIS.*VALUE'):
core.Fire(tc.ErrorInConstructor, command=['--help'])
def testHelpWithNamespaceCollision(self):
# Tests cases when calling the help shortcut should not show help.
with self.assertOutputMatches(stdout='DESCRIPTION.*', stderr=None):
core.Fire(tc.WithHelpArg, command=['--help', 'False'])
with self.assertOutputMatches(stdout='help in a dict', stderr=None):
core.Fire(tc.WithHelpArg, command=['dictionary', '__help'])
with self.assertOutputMatches(stdout='{}', stderr=None):
core.Fire(tc.WithHelpArg, command=['dictionary', '--help'])
with self.assertOutputMatches(stdout='False', stderr=None):
core.Fire(tc.function_with_help, command=['False'])
def testInvalidParameterRaisesFireExit(self):
with self.assertRaisesFireExit(2, 'runmisspelled'):
core.Fire(tc.Kwargs, command=['props', '--a=1', '--b=2', 'runmisspelled'])
def testErrorRaising(self):
# Errors in user code should not be caught; they should surface as normal.
# This will lead to exit status code 1 for the client program.
with self.assertRaises(ValueError):
core.Fire(tc.ErrorRaiser, command=['fail'])
def testFireError(self):
error = core.FireError('Example error')
self.assertIsNotNone(error)
def testFireErrorMultipleValues(self):
error = core.FireError('Example error', 'value')
self.assertIsNotNone(error)
def testPrintEmptyDict(self):
with self.assertOutputMatches(stdout='{}', stderr=None):
core.Fire(tc.EmptyDictOutput, command=['totally_empty'])
with self.assertOutputMatches(stdout='{}', stderr=None):
core.Fire(tc.EmptyDictOutput, command=['nothing_printable'])
def testPrintOrderedDict(self):
with self.assertOutputMatches(stdout=r'A:\s+A\s+2:\s+2\s+', stderr=None):
core.Fire(tc.OrderedDictionary, command=['non_empty'])
with self.assertOutputMatches(stdout='{}'):
core.Fire(tc.OrderedDictionary, command=['empty'])
def testPrintNamedTupleField(self):
with self.assertOutputMatches(stdout='11', stderr=None):
core.Fire(tc.NamedTuple, command=['point', 'x'])
def testPrintNamedTupleFieldNameEqualsValue(self):
with self.assertOutputMatches(stdout='x', stderr=None):
core.Fire(tc.NamedTuple, command=['matching_names', 'x'])
def testPrintNamedTupleIndex(self):
with self.assertOutputMatches(stdout='22', stderr=None):
core.Fire(tc.NamedTuple, command=['point', '1'])
def testPrintSet(self):
with self.assertOutputMatches(stdout='.*three.*', stderr=None):
core.Fire(tc.simple_set(), command=[])
def testPrintFrozenSet(self):
with self.assertOutputMatches(stdout='.*three.*', stderr=None):
core.Fire(tc.simple_frozenset(), command=[])
def testPrintNamedTupleNegativeIndex(self):
with self.assertOutputMatches(stdout='11', stderr=None):
core.Fire(tc.NamedTuple, command=['point', '-2'])
def testCallable(self):
with self.assertOutputMatches(stdout=r'foo:\s+foo\s+', stderr=None):
core.Fire(tc.CallableWithKeywordArgument(), command=['--foo=foo'])
with self.assertOutputMatches(stdout=r'foo\s+', stderr=None):
core.Fire(tc.CallableWithKeywordArgument(), command=['print_msg', 'foo'])
with self.assertOutputMatches(stdout=r'', stderr=None):
core.Fire(tc.CallableWithKeywordArgument(), command=[])
def testCallableWithPositionalArgs(self):
with self.assertRaisesFireExit(2, ''):
# This does not give 7 since positional args are disallowed for callable
# objects.
core.Fire(tc.CallableWithPositionalArgs(), command=['3', '4'])
def testStaticMethod(self):
self.assertEqual(
core.Fire(tc.HasStaticAndClassMethods,
command=['static_fn', 'alpha']),
'alpha',
)
def testClassMethod(self):
self.assertEqual(
core.Fire(tc.HasStaticAndClassMethods,
command=['class_fn', '6']),
7,
)
def testCustomSerialize(self):
def serialize(x):
if isinstance(x, list):
return ', '.join(str(xi) for xi in x)
if isinstance(x, dict):
return ', '.join('{}={!r}'.format(k, v) for k, v in sorted(x.items()))
if x == 'special':
return ['SURPRISE!!', "I'm a list!"]
return x
ident = lambda x: x
with self.assertOutputMatches(stdout='a, b', stderr=None):
_ = core.Fire(ident, command=['[a,b]'], serialize=serialize)
with self.assertOutputMatches(stdout='a=5, b=6', stderr=None):
_ = core.Fire(ident, command=['{a:5,b:6}'], serialize=serialize)
with self.assertOutputMatches(stdout='asdf', stderr=None):
_ = core.Fire(ident, command=['asdf'], serialize=serialize)
with self.assertOutputMatches(
stdout="SURPRISE!!\nI'm a list!\n", stderr=None):
_ = core.Fire(ident, command=['special'], serialize=serialize)
with self.assertRaises(core.FireError):
core.Fire(ident, command=['asdf'], serialize=55)
def testLruCacheDecoratorBoundArg(self):
self.assertEqual(
core.Fire(tc.py3.LruCacheDecoratedMethod,
command=['lru_cache_in_class', 'foo']), 'foo')
def testLruCacheDecorator(self):
self.assertEqual(
core.Fire(tc.py3.lru_cache_decorated,
command=['foo']), 'foo')
if __name__ == '__main__':
testutils.main()
| CoreTest |
python | tornadoweb__tornado | tornado/test/template_test.py | {
"start": 10689,
"end": 18213
} | class ____(unittest.TestCase):
def setUp(self):
self.templates = {
"escaped.html": "{% autoescape xhtml_escape %}{{ name }}",
"unescaped.html": "{% autoescape None %}{{ name }}",
"default.html": "{{ name }}",
"include.html": """\
escaped: {% include 'escaped.html' %}
unescaped: {% include 'unescaped.html' %}
default: {% include 'default.html' %}
""",
"escaped_block.html": """\
{% autoescape xhtml_escape %}\
{% block name %}base: {{ name }}{% end %}""",
"unescaped_block.html": """\
{% autoescape None %}\
{% block name %}base: {{ name }}{% end %}""",
# Extend a base template with different autoescape policy,
# with and without overriding the base's blocks
"escaped_extends_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}""",
"escaped_overrides_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"unescaped_extends_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}""",
"unescaped_overrides_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"raw_expression.html": """\
{% autoescape xhtml_escape %}\
expr: {{ name }}
raw: {% raw name %}""",
}
def test_default_off(self):
loader = DictLoader(self.templates, autoescape=None)
name = "Bobby <table>s"
self.assertEqual(
loader.load("escaped.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("unescaped.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("default.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n",
)
def test_default_on(self):
loader = DictLoader(self.templates, autoescape="xhtml_escape")
name = "Bobby <table>s"
self.assertEqual(
loader.load("escaped.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("unescaped.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("default.html").generate(name=name), b"Bobby <table>s"
)
self.assertEqual(
loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n",
)
def test_unextended_block(self):
loader = DictLoader(self.templates)
name = "<script>"
self.assertEqual(
loader.load("escaped_block.html").generate(name=name),
b"base: <script>",
)
self.assertEqual(
loader.load("unescaped_block.html").generate(name=name), b"base: <script>"
)
def test_extended_block(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name="<script>")
self.assertEqual(render("escaped_extends_unescaped.html"), b"base: <script>")
self.assertEqual(
render("escaped_overrides_unescaped.html"), b"extended: <script>"
)
self.assertEqual(
render("unescaped_extends_escaped.html"), b"base: <script>"
)
self.assertEqual(
render("unescaped_overrides_escaped.html"), b"extended: <script>"
)
def test_raw_expression(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name='<>&"')
self.assertEqual(
render("raw_expression.html"), b"expr: <>&"\n" b'raw: <>&"'
)
def test_custom_escape(self):
loader = DictLoader({"foo.py": "{% autoescape py_escape %}s = {{ name }}\n"})
def py_escape(s):
self.assertEqual(type(s), bytes)
return repr(native_str(s))
def render(template, name):
return loader.load(template).generate(py_escape=py_escape, name=name)
self.assertEqual(render("foo.py", "<html>"), b"s = '<html>'\n")
self.assertEqual(render("foo.py", "';sys.exit()"), b"""s = "';sys.exit()"\n""")
self.assertEqual(
render("foo.py", ["not a string"]), b"""s = "['not a string']"\n"""
)
def test_manual_minimize_whitespace(self):
# Whitespace including newlines is allowed within template tags
# and directives, and this is one way to avoid long lines while
# keeping extra whitespace out of the rendered output.
loader = DictLoader(
{
"foo.txt": """\
{% for i in items
%}{% if i > 0 %}, {% end %}{#
#}{{i
}}{% end
%}"""
}
)
self.assertEqual(
loader.load("foo.txt").generate(items=range(5)), b"0, 1, 2, 3, 4"
)
def test_whitespace_by_filename(self):
# Default whitespace handling depends on the template filename.
loader = DictLoader(
{
"foo.html": " \n\t\n asdf\t ",
"bar.js": " \n\n\n\t qwer ",
"baz.txt": "\t zxcv\n\n",
"include.html": " {% include baz.txt %} \n ",
"include.txt": "\t\t{% include foo.html %} ",
}
)
# HTML and JS files have whitespace compressed by default.
self.assertEqual(loader.load("foo.html").generate(), b"\nasdf ")
self.assertEqual(loader.load("bar.js").generate(), b"\nqwer ")
# TXT files do not.
self.assertEqual(loader.load("baz.txt").generate(), b"\t zxcv\n\n")
# Each file maintains its own status even when included in
# a file of the other type.
self.assertEqual(loader.load("include.html").generate(), b" \t zxcv\n\n\n")
self.assertEqual(loader.load("include.txt").generate(), b"\t\t\nasdf ")
def test_whitespace_by_loader(self):
templates = {"foo.html": "\t\tfoo\n\n", "bar.txt": "\t\tbar\n\n"}
loader = DictLoader(templates, whitespace="all")
self.assertEqual(loader.load("foo.html").generate(), b"\t\tfoo\n\n")
self.assertEqual(loader.load("bar.txt").generate(), b"\t\tbar\n\n")
loader = DictLoader(templates, whitespace="single")
self.assertEqual(loader.load("foo.html").generate(), b" foo\n")
self.assertEqual(loader.load("bar.txt").generate(), b" bar\n")
loader = DictLoader(templates, whitespace="oneline")
self.assertEqual(loader.load("foo.html").generate(), b" foo ")
self.assertEqual(loader.load("bar.txt").generate(), b" bar ")
def test_whitespace_directive(self):
loader = DictLoader(
{
"foo.html": """\
{% whitespace oneline %}
{% for i in range(3) %}
{{ i }}
{% end %}
{% whitespace all %}
pre\tformatted
"""
}
)
self.assertEqual(
loader.load("foo.html").generate(), b" 0 1 2 \n pre\tformatted\n"
)
| AutoEscapeTest |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 6392,
"end": 7687
} | class ____:
def test_run_task_with_client_provided_uuid(
self, sync_prefect_client, events_pipeline
):
@task
def foo():
return 42
task_run_id = uuid4()
run_task_sync(foo, task_run_id=task_run_id)
events_pipeline.process_events(_sync=True)
task_run = sync_prefect_client.read_task_run(task_run_id)
assert task_run.id == task_run_id
async def test_with_provided_context(self, prefect_client):
@flow
def f():
pass
test_task_runner = ThreadPoolTaskRunner()
flow_run = await prefect_client.create_flow_run(f)
await propose_state(prefect_client, Running(), flow_run_id=flow_run.id)
result_store = await ResultStore().update_for_flow(f)
flow_run_context = EngineContext(
flow=f,
flow_run=flow_run,
client=prefect_client,
task_runner=test_task_runner,
result_store=result_store,
parameters={"x": "y"},
)
@task
def foo():
return FlowRunContext.get().flow_run.id
context = {"flow_run_context": flow_run_context.serialize()}
result = run_task_sync(foo, context=context)
assert result == flow_run.id
| TestRunTask |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 31522,
"end": 33614
} | class ____(Transform):
"""
Transform from unconstrained space to the simplex of one additional
dimension via a stick-breaking process.
This transform arises as an iterated sigmoid transform in a stick-breaking
construction of the `Dirichlet` distribution: the first logit is
transformed via sigmoid to the first probability and the probability of
everything else, and then the process recurses.
This is bijective and appropriate for use in HMC; however it mixes
coordinates together and is less appropriate for optimization.
"""
domain = constraints.real_vector
codomain = constraints.simplex
bijective = True
def __eq__(self, other):
return isinstance(other, StickBreakingTransform)
def _call(self, x):
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
z = _clipped_sigmoid(x - offset.log())
z_cumprod = (1 - z).cumprod(-1)
y = pad(z, [0, 1], value=1) * pad(z_cumprod, [1, 0], value=1)
return y
def _inverse(self, y):
y_crop = y[..., :-1]
offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1)
sf = 1 - y_crop.cumsum(-1)
# we clamp to make sure that sf is positive which sometimes does not
# happen when y[-1] ~ 0 or y[:-1].sum() ~ 1
sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny)
x = y_crop.log() - sf.log() + offset.log()
return x
def log_abs_det_jacobian(self, x, y):
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
x = x - offset.log()
# use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x)
detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1)
return detJ
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
| StickBreakingTransform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/final3.py | {
"start": 2103,
"end": 2910
} | class ____(ClassA):
# This should generate an error because we are overriding
# a member that is marked Final in the parent class.
member1 = 5
# This should generate an error because we are overriding
# a member that is marked Final in the parent class.
_member7: Final = 6
# This should not generate an error because it's private.
__member8: Final = 6
def __init__(self):
# This should generate an error because we are overriding
# a member that is marked Final in the parent class.
self.member6 = 5
# This should generate an error because Final isn't allowed for
# function parameters.
def func1(a: Final[int]):
pass
# This should generate an error because Final must the outermost
# type in assignments.
b: list[Final[int]] = []
| ClassB |
python | tensorflow__tensorflow | tensorflow/python/types/internal.py | {
"start": 1486,
"end": 1646
} | class ____(object):
"""Interface for internal isinstance checks to ops/ragged/ragged_tensor.py.
This helps to avoid circular dependencies.
"""
| RaggedTensor |
python | pytorch__pytorch | test/ao/sparsity/test_kernels.py | {
"start": 694,
"end": 9178
} | class ____(TestCase):
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
@override_qengines
def test_sparse_qlinear(self):
batch_size = 12
input_channels = 16
output_channels = 4
decimal_val = 4
row_block_size = 1
col_block_size = 4
# X86 implementation of sparse ops in qnnpack only support
# block pattern 1x4.
# arm kernels have support for both 1x4 and 8x1.
# This distinction is only because x86 implementations exist
# only to enable testing of integration path.
# We do plan to add 8x1 as well so that testing does not have to
# special case like this. At the moment it is deprioritized due
# to other higher priority works.
if qengine_is_qnnpack() and not (row_block_size == 1 and col_block_size == 4):
return
# ONEDNN and X86 do not support this yet
if qengine_is_onednn() or qengine_is_x86():
return
dense_prepack = torch.ops.quantized.linear_prepack
dense_qlinear = torch.ops.quantized.linear
dense_qlinear_dynamic = torch.ops.quantized.linear_dynamic
sparse_prepack = torch.ops.sparse.qlinear_prepack
sparse_qlinear = torch.ops.sparse.qlinear
sparse_qlinear_dynamic = torch.ops.sparse.qlinear_dynamic
X_scale = 0.2
X_zp = 2
X_fp32 = torch.randn(batch_size, input_channels, dtype=torch.float32)
float_bias = torch.randn(output_channels, dtype=torch.float32)
W_scales = torch.rand(output_channels, dtype=torch.float32)
W_zps = torch.zeros(output_channels, dtype=torch.int32)
W_fp32 = torch.randn(output_channels, input_channels, dtype=torch.float32)
with override_cpu_allocator_for_qnnpack(qengine_is_qnnpack()):
X_q = torch.quantize_per_tensor(
X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8
)
for use_channelwise, dynamic_mode in product([True, False], [True, False]):
if qengine_is_fbgemm() and dynamic_mode:
logger.info("dynamic sparse qlinear is only available in qnnpack")
continue
if qengine_is_qnnpack() and not dynamic_mode:
logger.info("static sparse qlinear is only available in fbgemm")
continue
if use_channelwise:
W_q = torch.quantize_per_channel(
W_fp32,
scales=W_scales,
zero_points=W_zps,
axis=0,
dtype=torch.qint8,
)
else:
W_q = torch.quantize_per_tensor(
W_fp32,
scale=W_scales[0],
zero_point=W_zps[0],
dtype=torch.qint8,
)
Y_scale = 1.1234
Y_zp = 5
W_prepack_dense = dense_prepack(W_q, float_bias)
W_prepack_sparse = sparse_prepack(
W_q, float_bias, row_block_size, col_block_size
)
if dynamic_mode:
Y = sparse_qlinear_dynamic(X_fp32, W_prepack_sparse)
Y_ref = dense_qlinear_dynamic(X_fp32, W_prepack_dense)
np.testing.assert_array_almost_equal(
Y_ref.numpy(), Y.numpy(), decimal=decimal_val
)
else:
Y_q = sparse_qlinear(X_q, W_prepack_sparse, Y_scale, Y_zp)
Y_q_ref = dense_qlinear(X_q, W_prepack_dense, Y_scale, Y_zp)
np.testing.assert_array_almost_equal(
Y_q_ref.int_repr().numpy(),
Y_q.int_repr().numpy(),
decimal=decimal_val,
)
def _sparse_layer_test_helper(
model_class,
sparse_mapping,
ref_mapping,
qconfig_dict,
fqn_to_check,
test_class,
test_scripting,
):
# SET UP TEST PARAMETERS, INPUTS AND WEIGHTS
# ------------------------------------------
batch_size = 12
input_channels = 4
output_channels = 7
model = model_class(input_channels, output_channels)
# For sparse kernels both the activation and weight ZP = 0
X_scale = 0.2
X_zp = 2
W_scale = 1e-2
W_zp = 0
X_fp32 = torch.randn(batch_size, input_channels, dtype=torch.float32)
# generate a weight which we'll insert into the model
W_fp32 = torch.randn(output_channels, input_channels, dtype=torch.float32)
mask = torch.randint(0, 2, W_fp32.shape)
W_fp32 *= mask
with override_cpu_allocator_for_qnnpack(qengine_is_qnnpack()):
X_q = torch.quantize_per_tensor(
X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8
)
X_fp32 = X_q.dequantize()
W_q = torch.quantize_per_tensor(W_fp32, W_scale, W_zp, torch.qint8)
# PREPARE MODELS FOR QUANTIZATION
# -------------------------------
model.linear.weight = nn.Parameter(W_q.dequantize())
model.eval()
# Add `sparse_params` to the model. The test for correct
# sparse_param addition is in the sparsifier tests
model.linear.sparse_params = {"sparse_block_shape": (1, 4)}
# generate model versions
qmodel = copy.deepcopy(model)
sqmodel = copy.deepcopy(model)
# generate model versions and apply qconfigs
tq.propagate_qconfig_(qmodel, qconfig_dict)
tq.propagate_qconfig_(sqmodel, qconfig_dict)
tq.prepare(qmodel, inplace=True)
tq.prepare(sqmodel, inplace=True)
# calibrate
with torch.no_grad():
qmodel(X_fp32)
sqmodel(X_fp32)
# ACTUAL TESTING BEGINS HERE
# --------------------------
# Make sure the quantization parameters are computed the same way
qparams = qmodel.linear.qconfig.weight().calculate_qparams()
sqparams = sqmodel.linear.qconfig.weight().calculate_qparams()
test_class.assertEqual(qparams, sqparams)
sqmodule_to_check = fqn_to_module(sqmodel, fqn_to_check)
sqmodule_start_class = sqmodule_to_check.__class__
sqmodule_expected_converted_class = sparse_mapping[sqmodule_start_class]
qmodule_to_check = fqn_to_module(qmodel, fqn_to_check)
qmodule_start_class = qmodule_to_check.__class__
qmodule_expected_converted_class = ref_mapping[qmodule_start_class]
# need to determine whether dynamic quantization is being performed since
# input dtype will be different at the end
is_dynamic = isinstance(
qmodule_to_check.activation_post_process, tq.PlaceholderObserver
)
tq.convert(sqmodel, inplace=True, mapping=sparse_mapping)
tq.convert(qmodel, inplace=True, mapping=ref_mapping)
# this code is a duplicate of above since the references do not
# update to the post-convert modules
sqmodule_to_check = fqn_to_module(sqmodel, fqn_to_check)
qmodule_to_check = fqn_to_module(qmodel, fqn_to_check)
# check that the modules were converted as expected
assert isinstance(sqmodule_to_check, sqmodule_expected_converted_class), (
"Convert failed"
)
assert isinstance(qmodule_to_check, qmodule_expected_converted_class), (
"Mapping failed"
)
row_block_size, col_block_size = sqmodel.linear._packed_params._weight_bias()[
2:
]
assert row_block_size == 1 and col_block_size == 4
# only run during serialization/deserialization tests
# makes sure script/save/load doesn't malform the sqmodel
if test_scripting:
scripted_sqmodel = torch.jit.script(sqmodel)
scripted_sqmodel.eval()
buffer = io.BytesIO()
torch.jit.save(scripted_sqmodel, buffer)
buffer.seek(0)
sqmodel = torch.jit.load(buffer)
# use correct input dtype
if is_dynamic:
Y_ref = qmodel(X_fp32)
Y_hat = sqmodel(X_fp32)
test_class.assertEqual(Y_ref, Y_hat)
else:
Y_ref = qmodel(X_q)
Y_hat = sqmodel(X_q)
test_class.assertEqual(Y_ref.dequantize(), Y_hat.dequantize())
| TestQuantizedSparseKernels |
python | allegroai__clearml | clearml/backend_api/services/v2_20/workers.py | {
"start": 68065,
"end": 72089
} | class ____(Response):
"""
Response of workers.get_stats endpoint.
:param workers: List of the requested workers with their statistics
:type workers: Sequence[WorkerStats]
"""
_service = "workers"
_action = "get_stats"
_version = "2.20"
_schema = {
"definitions": {
"aggregation_stats": {
"properties": {
"aggregation": {
"oneOf": [
{"$ref": "#/definitions/aggregation_type"},
{"type": "null"},
]
},
"values": {
"description": "List of values corresponding to the dates in metric statistics",
"items": {"type": "number"},
"type": ["array", "null"],
},
},
"type": "object",
},
"aggregation_type": {
"description": "Metric aggregation type",
"enum": ["avg", "min", "max"],
"type": "string",
},
"metric_stats": {
"properties": {
"dates": {
"description": "List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no workers activity was recorded are omitted.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"metric": {
"description": "Name of the metric (cpu_usage, memory_used etc.)",
"type": ["string", "null"],
},
"stats": {
"description": "Statistics data by type",
"items": {"$ref": "#/definitions/aggregation_stats"},
"type": ["array", "null"],
},
"variant": {
"description": "Name of the metric component. Set only if 'split_by_variant' was set in the request",
"type": ["string", "null"],
},
},
"type": "object",
},
"worker_stats": {
"properties": {
"metrics": {
"description": "List of the metrics statistics for the worker",
"items": {"$ref": "#/definitions/metric_stats"},
"type": ["array", "null"],
},
"worker": {
"description": "ID of the worker",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"workers": {
"description": "List of the requested workers with their statistics",
"items": {"$ref": "#/definitions/worker_stats"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, workers: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetStatsResponse, self).__init__(**kwargs)
self.workers = workers
@schema_property("workers")
def workers(self) -> Optional[List[Any]]:
return self._property_workers
@workers.setter
def workers(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_workers = None
return
self.assert_isinstance(value, "workers", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [WorkerStats.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "workers", WorkerStats, is_array=True)
self._property_workers = value
| GetStatsResponse |
python | doocs__leetcode | solution/0800-0899/0848.Shifting Letters/Solution.py | {
"start": 0,
"end": 304
} | class ____:
def shiftingLetters(self, s: str, shifts: List[int]) -> str:
n, t = len(s), 0
s = list(s)
for i in range(n - 1, -1, -1):
t += shifts[i]
j = (ord(s[i]) - ord("a") + t) % 26
s[i] = ascii_lowercase[j]
return "".join(s)
| Solution |
python | scrapy__scrapy | tests/test_commands.py | {
"start": 14540,
"end": 15367
} | class ____:
def test_valid_command(self) -> None:
argv = ["scrapy", "crawl", "my_spider"]
command = _pop_command_name(argv)
assert command == "crawl"
assert argv == ["scrapy", "my_spider"]
def test_no_command(self) -> None:
argv = ["scrapy"]
command = _pop_command_name(argv)
assert command is None
assert argv == ["scrapy"]
def test_option_before_command(self) -> None:
argv = ["scrapy", "-h", "crawl"]
command = _pop_command_name(argv)
assert command == "crawl"
assert argv == ["scrapy", "-h"]
def test_option_after_command(self) -> None:
argv = ["scrapy", "crawl", "-h"]
command = _pop_command_name(argv)
assert command == "crawl"
assert argv == ["scrapy", "-h"]
| TestPopCommandName |
python | getsentry__sentry | src/sentry/models/organizationmember.py | {
"start": 2638,
"end": 3488
} | class ____(Enum):
APPROVED = 0
REQUESTED_TO_BE_INVITED = 1
REQUESTED_TO_JOIN = 2
@classmethod
def as_choices(cls):
return (
(InviteStatus.APPROVED.value, _("Approved")),
(
InviteStatus.REQUESTED_TO_BE_INVITED.value,
_("Organization member requested to invite user"),
),
(InviteStatus.REQUESTED_TO_JOIN.value, _("User requested to join organization")),
)
invite_status_names = {
InviteStatus.APPROVED.value: "approved",
InviteStatus.REQUESTED_TO_BE_INVITED.value: "requested_to_be_invited",
InviteStatus.REQUESTED_TO_JOIN.value: "requested_to_join",
}
ERR_CANNOT_INVITE = "Your organization is not allowed to invite members."
ERR_JOIN_REQUESTS_DISABLED = "Your organization does not allow requests to join."
| InviteStatus |
python | arrow-py__arrow | arrow/locales.py | {
"start": 67634,
"end": 68929
} | class ____(Locale):
names = ["hi", "hi-in"]
past = "{0} पहले"
future = "{0} बाद"
timeframes = {
"now": "अभी",
"second": "एक पल",
"seconds": "{0} सेकंड्",
"minute": "एक मिनट ",
"minutes": "{0} मिनट ",
"hour": "एक घंटा",
"hours": "{0} घंटे",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक माह ",
"months": "{0} महीने ",
"year": "एक वर्ष ",
"years": "{0} साल ",
}
meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"}
month_names = [
"",
"जनवरी",
"फरवरी",
"मार्च",
"अप्रैल ",
"मई",
"जून",
"जुलाई",
"अगस्त",
"सितंबर",
"अक्टूबर",
"नवंबर",
"दिसंबर",
]
month_abbreviations = [
"",
"जन",
"फ़र",
"मार्च",
"अप्रै",
"मई",
"जून",
"जुलाई",
"आग",
"सित",
"अकत",
"नवे",
"दिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"]
| HindiLocale |
python | django__django | tests/null_fk_ordering/models.py | {
"start": 362,
"end": 629
} | class ____(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, models.SET_NULL, null=True)
class Meta:
ordering = ["author__name"]
# These following 4 models represent a far more complex ordering case.
| Article |
python | doocs__leetcode | solution/2500-2599/2551.Put Marbles in Bags/Solution.py | {
"start": 0,
"end": 199
} | class ____:
def putMarbles(self, weights: List[int], k: int) -> int:
arr = sorted(a + b for a, b in pairwise(weights))
return sum(arr[len(arr) - k + 1 :]) - sum(arr[: k - 1])
| Solution |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 143,
"end": 232
} | class ____(Exception):
"""Raised when a deprecated feature is used."""
| DeprecationError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.