language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-core/llama_index/core/evaluation/retrieval/evaluator.py | {
"start": 506,
"end": 1834
} | class ____(BaseRetrievalEvaluator):
"""
Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
default=None, description="Optional post-processor"
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts, potentially applying a post-processor."""
retrieved_nodes = await self.retriever.aretrieve(query)
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
return (
[node.node.node_id for node in retrieved_nodes],
[node.text for node in retrieved_nodes],
)
| RetrieverEvaluator |
python | ansible__ansible | test/units/plugins/filter/test_mathstuff.py | {
"start": 3893,
"end": 4474
} | class ____:
def test_root_non_number(self):
with pytest.raises(AnsibleError, match="root\\(\\) can only be used on numbers: could not convert string to float: 'a'"):
ms.inversepower(10, 'a')
with pytest.raises(AnsibleError, match="root\\(\\) can only be used on numbers: must be real number, not str"):
ms.inversepower('a', 10)
def test_square_root(self):
assert ms.inversepower(100) == 10
assert ms.inversepower(100, 2) == 10
def test_cube_root(self):
assert ms.inversepower(27, 3) == 3
| TestInversePower |
python | realpython__materials | emacs-the-best-python-editor/PyEval/expr_test.py | {
"start": 115,
"end": 4153
} | class ____(unittest.TestCase):
"""
Validation of Expression and Operator classes.
No setup function is needed
"""
def test_positive_operand_expression(self):
"""
Tests a single positive operand expression
"""
expr = Expression("53")
self.assertEqual("53 ", expr.result(), "ERROR: Positive operand")
def test_negative_operand_expression(self):
"""
Tests a single negative operand expression
"""
expr = Expression("-53")
self.assertEqual("-53 ", expr.result(), "ERROR: Negative operand")
def test_double_term_expression(self):
"""
Tests a set of double term expressions
"""
expr = Expression("53+2")
self.assertEqual(
"53 2 + ", expr.result(), "ERROR: Double positive term expression"
)
expr = Expression("-53+2")
self.assertEqual(
"-53 2 + ",
expr.result(),
"ERROR: Negative/positive term expression",
)
expr = Expression("53+-2")
self.assertEqual(
"53 -2 + ",
expr.result(),
"ERROR: Positive/negative term expression",
)
expr = Expression("-53+-2")
self.assertEqual(
"-53 -2 + ",
expr.result(),
"ERROR: Double negative term expression",
)
def test_double_term_operands(self):
"""
Tests a set of operands
"""
expr = Expression("53+2")
self.assertEqual(
"53 2 + ", expr.result(), "ERROR: Additive expression"
)
expr = Expression("53-2")
self.assertEqual(
"53 2 - ", expr.result(), "ERROR: Subtrative expression"
)
expr = Expression("53*2")
self.assertEqual(
"53 2 * ", expr.result(), "ERROR: Multiplicative expression"
)
expr = Expression("53/2")
self.assertEqual("53 2 / ", expr.result(), "ERROR: Divide expression")
def test_triple_term_expression(self):
"""
Tests a set of triple term expressions
"""
expr = Expression("53+2+37")
self.assertEqual(
"53 2 37 + + ", expr.result(), "ERROR: Add/Add expression"
)
expr = Expression("53+2*37")
self.assertEqual(
"53 2 37 * + ", expr.result(), "ERROR: Add/Multiply expression"
)
expr = Expression("53*2+37")
self.assertEqual(
"53 2 * 37 + ", expr.result(), "ERROR: Multiply/Add expression"
)
expr = Expression("53*2*37")
self.assertEqual(
"53 2 37 * * ",
expr.result(),
"ERROR: Multiply/Multiply expression",
)
def test_whitespace_expression(self):
"""
Tests a set of expressions with a variety of whitespace
"""
expr = Expression("53+2+37")
self.assertEqual(
"53 2 37 + + ", expr.result(), "ERROR: No whitespace expression"
)
expr = Expression("53 + 2 + 37")
self.assertEqual(
"53 2 37 + + ",
expr.result(),
"ERROR: Infixed whitespace expression",
)
expr = Expression(" 53+2+37 ")
self.assertEqual(
"53 2 37 + + ",
expr.result(),
"ERROR: Pre/post-fixed whitespace expression",
)
expr = Expression(" 53 + 2 + 37 ")
self.assertEqual(
"53 2 37 + + ",
expr.result(),
"ERROR: Pre/post/in-fixed whitespace expression",
)
expr = Expression(" 53 + 2 + 37 ")
self.assertEqual(
"53 2 37 + + ",
expr.result(),
"ERROR: Multiple whitespace expression",
)
# This test should throw an exception - spaces in between operands
# should give an error
with self.assertRaises(SyntaxError):
expr = Expression(" 53 + - 2 + 37 ")
expr.parse()
| TestPyEval |
python | pytorch__pytorch | torch/_inductor/test_operators.py | {
"start": 450,
"end": 861
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx: object, x: Tensor) -> Tensor:
return torch.ops._inductor_test.realize(x)
@staticmethod
# types need to stay consistent with _SingleLevelFunction
def backward(ctx: Any, *grad_output: Any) -> Any:
return grad_output[0]
def realize(x: Tensor) -> Tensor:
return Realize.apply(x)
| Realize |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/dynamic_args/test_multiple_eval_dataloaders.py | {
"start": 1005,
"end": 2222
} | class ____(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return torch.ones(1)
def __len__(self):
return self.len
@pytest.mark.parametrize("seq_type", [tuple, list])
def test_multiple_eval_dataloaders_seq(tmp_path, seq_type):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx, dataloader_idx):
if dataloader_idx == 0:
assert batch.sum() == 0
elif dataloader_idx == 1:
assert batch.sum() == 11
else:
raise Exception("should only have two dataloaders")
def val_dataloader(self):
dl1 = torch.utils.data.DataLoader(RandomDatasetA(32, 64), batch_size=11)
dl2 = torch.utils.data.DataLoader(RandomDatasetB(32, 64), batch_size=11)
return seq_type((dl1, dl2))
model = TestModel()
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
| RandomDatasetB |
python | pytest-dev__pytest | testing/_py/test_local.py | {
"start": 37186,
"end": 40690
} | class ____:
OPTS = {"ensuresyspath": "importlib"}
def test_pyimport(self, path1):
obj = path1.join("execfile.py").pyimport(**self.OPTS)
assert obj.x == 42
assert obj.__name__ == "execfile"
def test_pyimport_dir_fails(self, tmpdir):
p = tmpdir.join("hello_123")
p.ensure("__init__.py")
with pytest.raises(ImportError):
p.pyimport(**self.OPTS)
def test_pyimport_execfile_different_name(self, path1):
obj = path1.join("execfile.py").pyimport(modname="0x.y.z", **self.OPTS)
assert obj.x == 42
assert obj.__name__ == "0x.y.z"
def test_pyimport_relative_import_fails(self, path1):
otherdir = path1.join("otherdir")
with pytest.raises(ImportError):
otherdir.join("a.py").pyimport(**self.OPTS)
def test_pyimport_doesnt_use_sys_modules(self, tmpdir):
p = tmpdir.ensure("file738jsk.py")
mod = p.pyimport(**self.OPTS)
assert mod.__name__ == "file738jsk"
assert "file738jsk" not in sys.modules
def test_pypkgdir(tmpdir):
pkg = tmpdir.ensure("pkg1", dir=1)
pkg.ensure("__init__.py")
pkg.ensure("subdir/__init__.py")
assert pkg.pypkgpath() == pkg
assert pkg.join("subdir", "__init__.py").pypkgpath() == pkg
def test_pypkgdir_unimportable(tmpdir):
pkg = tmpdir.ensure("pkg1-1", dir=1) # unimportable
pkg.ensure("__init__.py")
subdir = pkg.ensure("subdir/__init__.py").dirpath()
assert subdir.pypkgpath() == subdir
assert subdir.ensure("xyz.py").pypkgpath() == subdir
assert not pkg.pypkgpath()
def test_isimportable():
try:
from py.path import isimportable # py vendored version
except ImportError:
from py._path.local import isimportable # py library
assert not isimportable("")
assert isimportable("x")
assert isimportable("x1")
assert isimportable("x_1")
assert isimportable("_")
assert isimportable("_1")
assert not isimportable("x-1")
assert not isimportable("x:1")
def test_homedir_from_HOME(monkeypatch):
path = os.getcwd()
monkeypatch.setenv("HOME", path)
assert local._gethomedir() == local(path)
def test_homedir_not_exists(monkeypatch):
monkeypatch.delenv("HOME", raising=False)
monkeypatch.delenv("HOMEDRIVE", raising=False)
homedir = local._gethomedir()
assert homedir is None
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure("hello")
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if sys.platform == "win32":
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1.samefile(p2)
@pytest.mark.skipif(not hasattr(os, "symlink"), reason="os.symlink not available")
def test_samefile_symlink(tmpdir):
p1 = tmpdir.ensure("foo.txt")
p2 = tmpdir.join("linked.txt")
try:
os.symlink(str(p1), str(p2))
except (OSError, NotImplementedError) as e:
# on Windows this might fail if the user doesn't have special symlink permissions
# pypy3 on Windows doesn't implement os.symlink and raises NotImplementedError
pytest.skip(str(e.args[0]))
assert p1.samefile(p2)
def test_listdir_single_arg(tmpdir):
tmpdir.ensure("hello")
assert tmpdir.listdir("hello")[0].basename == "hello"
def test_mkdtemp_rootdir(tmpdir):
dtmp = local.mkdtemp(rootdir=tmpdir)
assert tmpdir.listdir() == [dtmp]
| TestImportlibImport |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py | {
"start": 2411,
"end": 2829
} | class ____:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
| YXYX |
python | pytransitions__transitions | transitions/experimental/utils.py | {
"start": 3907,
"end": 4954
} | class ____(metaclass=ABCMeta):
{model_attribute}: "StateIdentifier" = ""
def trigger(self, name: str) -> bool: {_placeholder_body}
{trigger_block}
{state_block}\
{callback_block}"""
return template
def with_model_definitions(cls):
add_model = getattr(cls, "add_model")
def add_model_override(self, model, initial=None):
self.model_override = True
for model in listify(model):
model = self if model == "self" else model
for name, specs in TriggerPlaceholder.definitions.get(model.__class__, {}).items():
for spec in specs:
if isinstance(spec, list):
self.add_transition(name, *spec)
elif isinstance(spec, dict):
self.add_transition(name, **spec)
else:
raise ValueError("Cannot add {} for event {} to machine", spec, name)
add_model(self, model, initial)
setattr(cls, 'add_model', add_model_override)
return cls
| BaseModel |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 49911,
"end": 50731
} | class ____(Operation):
def call(self, x, y):
return backend.numpy.bitwise_and(x, y)
def compute_output_spec(self, x, y):
dtype = dtypes.result_type(x.dtype, y.dtype)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.bitwise_and", "keras.ops.numpy.bitwise_and"])
def bitwise_and(x, y):
"""Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of the
integers in the input arrays. This ufunc implements the C/Python operator
`&`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor.
"""
if any_symbolic_tensors((x, y)):
return BitwiseAnd().symbolic_call(x, y)
return backend.numpy.bitwise_and(x, y)
| BitwiseAnd |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_bash_code_execution_result_block_param.py | {
"start": 361,
"end": 646
} | class ____(TypedDict, total=False):
content: Required[Iterable[BetaBashCodeExecutionOutputBlockParam]]
return_code: Required[int]
stderr: Required[str]
stdout: Required[str]
type: Required[Literal["bash_code_execution_result"]]
| BetaBashCodeExecutionResultBlockParam |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 25062,
"end": 27400
} | class ____(BiffRecord):
"""
Record FORMAT, BIFF8:
Offset Size Contents
0 2 Format index used in other records
2 var. Number format string (Unicode string, 16-bit string length)
From BIFF5 on, the built-in number formats will be omitted. The built-in
formats are dependent on the current regional settings of the operating
system. The following table shows which number formats are used by default
in a US-English environment. All indexes from 0 to 163 are reserved for
built-in formats. The first user-defined format starts at 164.
The built-in number formats, BIFF5-BIFF8
Index Type Format string
0 General General
1 Decimal 0
2 Decimal 0.00
3 Decimal #,##0
4 Decimal #,##0.00
5 Currency "$"#,##0_);("$"#,##
6 Currency "$"#,##0_);[Red]("$"#,##
7 Currency "$"#,##0.00_);("$"#,##
8 Currency "$"#,##0.00_);[Red]("$"#,##
9 Percent 0%
10 Percent 0.00%
11 Scientific 0.00E+00
12 Fraction # ?/?
13 Fraction # ??/??
14 Date M/D/YY
15 Date D-MMM-YY
16 Date D-MMM
17 Date MMM-YY
18 Time h:mm AM/PM
19 Time h:mm:ss AM/PM
20 Time h:mm
21 Time h:mm:ss
22 Date/Time M/D/YY h:mm
37 Account _(#,##0_);(#,##0)
38 Account _(#,##0_);[Red](#,##0)
39 Account _(#,##0.00_);(#,##0.00)
40 Account _(#,##0.00_);[Red](#,##0.00)
41 Currency _("$"* #,##0_);_("$"* (#,##0);_("$"* "-"_);_(@_)
42 Currency _(* #,##0_);_(* (#,##0);_(* "-"_);_(@_)
43 Currency _("$"* #,##0.00_);_("$"* (#,##0.00);_("$"* "-"??_);_(@_)
44 Currency _(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(@_)
45 Time mm:ss
46 Time [h]:mm:ss
47 Time mm:ss.0
48 Scientific ##0.0E+0
49 Text @
"""
_REC_ID = 0x041E
def __init__(self, idx, fmtstr):
ufmtstr = upack2(fmtstr)
ufmtstr_len = len(ufmtstr)
self._rec_data = pack('<H%ds' % ufmtstr_len, idx, ufmtstr)
| NumberFormatRecord |
python | django__django | tests/contenttypes_tests/models.py | {
"start": 1688,
"end": 1807
} | class ____(models.Model):
text = models.CharField(max_length=200)
answer_set = GenericRelation("Answer")
| Question |
python | cython__cython | Cython/Compiler/FlowControl.py | {
"start": 13034,
"end": 13937
} | class ____(list):
# Keeps track of Node's entry assignments
#
# cf_is_null [boolean] It is uninitialized
# cf_maybe_null [boolean] May be uninitialized
# is_single [boolean] Has only one assignment at this point
cf_maybe_null = False
cf_is_null = False
is_single = False
def __init__(self, state):
if Uninitialized in state:
state.discard(Uninitialized)
self.cf_maybe_null = True
if not state:
self.cf_is_null = True
elif Unknown in state:
state.discard(Unknown)
self.cf_maybe_null = True
else:
if len(state) == 1:
self.is_single = True
# XXX: Remove fake_rhs_expr
super().__init__(
[i for i in state if i.rhs is not fake_rhs_expr])
def one(self):
return self[0]
| ControlFlowState |
python | pytorch__pytorch | torch/utils/_sympy/value_ranges.py | {
"start": 1062,
"end": 3674
} | class ____(RuntimeError):
pass
# Like sympify, but supports less stuff, and also ensures that direct
# sympy expressions don't have free variables
def simple_sympify(e):
if isinstance(e, bool):
return sympy.true if e else sympy.false
elif isinstance(e, int):
return sympy.Integer(e)
elif isinstance(e, float):
# infinity is special; we use it to bracket integers as well
if math.isinf(e):
return sympy.oo if e > 0 else -sympy.oo
return sympy.Float(e)
elif isinstance(e, sympy.Expr):
if not getattr(e, "is_number", False):
raise AssertionError(e)
# NaNs can occur when doing things like 0 * sympy.oo, but it is better
# if the operator notices this and takes care of it, because sometimes
# the NaN is inappropriate (for example, for ints, the [-oo, oo] range
# should go to zero when multiplied with [0, 0])
if e == sympy.nan:
raise AssertionError("sympy expression is NaN")
return e
elif isinstance(e, BooleanAtom):
return e
else:
raise AssertionError(f"not simple sympy type {type(e)}: {e}")
# Sympy atomics only. Unlike <=, it also works on Sympy bools.
def sympy_generic_le(lower, upper):
if isinstance(lower, sympy.Expr):
if not isinstance(upper, sympy.Expr):
raise AssertionError(
"upper must be a sympy.Expr when lower is a sympy.Expr"
)
# instead of lower <= upper, we do upper >= lower since upper is mostly int_oo
# and we have better code paths there.
return upper >= lower
else:
# only negative condition is True > False
if not isinstance(lower, SympyBoolean) or not isinstance(upper, SympyBoolean):
raise AssertionError((lower, upper))
return not (lower and not upper)
def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]:
return vr.is_bool
def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]:
return not vr.is_bool
def is_sympy_integer(value) -> TypeIs[sympy.Integer]:
return isinstance(value, sympy.Integer)
ExprIn = Union[int, float, sympy.Expr]
BoolIn = Union[bool, SympyBoolean]
AllIn = Union[ExprIn, BoolIn]
ExprFn = Callable[[sympy.Expr], sympy.Expr]
ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr]
BoolFn = Callable[[SympyBoolean], SympyBoolean]
BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean]
AllFn = Union[ExprFn, BoolFn]
AllFn2 = Union[ExprFn2, BoolFn2]
@dataclasses.dataclass(frozen=True)
| ValueRangeError |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/workflow_events.py | {
"start": 650,
"end": 758
} | class ____(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
| AgentSetup |
python | pytorch__pytorch | torch/_functorch/pyfunctorch.py | {
"start": 5231,
"end": 6510
} | class ____(FuncTorchInterpreter):
def __init__(self, cdata: CInterpreter):
assert cdata.key() == TransformType.Grad
# See NOTE: [Interpreter cdata vs cptr]
self._cdata = cdata
@cached_property
# pyrefly: ignore [bad-override]
def _cptr(self):
return CGradInterpreterPtr(self._cdata)
def lift(self, args, kwargs):
args, kwargs = pytree.tree_map_only(
torch.Tensor, self._cptr.lift, [args, kwargs]
)
return args, kwargs
def process(self, op, args, kwargs):
kernel = op.functorch_table[TransformType.Grad]
args, kwargs = self.lift(args, kwargs)
return kernel(self, *args, **kwargs)
# GradInterpreter has custom lower because of the no_grad interaction
# See NOTE [grad and vjp interaction with no_grad]
# This logic is mirrored from C++ GradInterpreterPtr::sendToNextInterpreter
def lower(self):
prev_grad_mode = self.prev_grad_mode()
if not prev_grad_mode:
return nested(torch.no_grad(), super().lower())
return super().lower()
def prev_grad_mode(self):
return self._cptr.prevGradMode()
def get_state(self):
return (self.key().name, self.level(), self.prev_grad_mode())
| GradInterpreter |
python | huggingface__transformers | src/transformers/models/siglip2/modeling_siglip2.py | {
"start": 27662,
"end": 29202
} | class ____(nn.Module):
"""Multihead Attention Pooling."""
def __init__(self, config: Siglip2VisionConfig):
super().__init__()
self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = Siglip2MLP(config)
self.num_heads = config.num_attention_heads
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size = hidden_state.shape[0]
probe = self.probe.repeat(batch_size, 1, 1)
if attention_mask is not None:
target_len, source_len = probe.shape[1], hidden_state.shape[1]
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_state.dtype, target_len)
attention_mask = attention_mask.repeat(1, self.num_heads, target_len, 1)
attention_mask = attention_mask.reshape(-1, target_len, source_len)
hidden_state = self.attention(probe, hidden_state, hidden_state, attn_mask=attention_mask)[0]
residual = hidden_state
hidden_state = self.layernorm(hidden_state)
hidden_state = residual + self.mlp(hidden_state)
return hidden_state[:, 0]
@auto_docstring(
custom_intro="""
The vision model from Siglip2 without any head or projection on top.
"""
)
| Siglip2MultiheadAttentionPoolingHead |
python | pytorch__pytorch | test/test_utils.py | {
"start": 35912,
"end": 36301
} | class ____(TestCase):
def test_deprecated(self):
with self.assertWarnsRegex(Warning, "is DEPRECATED"):
deprecated_api(1, 2) # noqa: F821
with self.assertWarnsRegex(Warning, "is DEPRECATED"):
deprecated_api(1, y=2) # noqa: F821
_deprecated_api(1, 2)
_deprecated_api(1, y=2)
if __name__ == "__main__":
run_tests()
| TestDeprecate |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 25186,
"end": 35672
} | class ____:
"""An abstract base class that provides color, line styles, etc."""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0, 1 not True, False for extension code
self._capstyle = CapStyle('butt')
self._cliprect = None
self._clippath = None
self._dashes = 0, None
self._joinstyle = JoinStyle('round')
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = None
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
"""Copy properties from *gc* to self."""
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._hatch_color = gc._hatch_color
self._hatch_linewidth = gc._hatch_linewidth
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack.
"""
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends.
"""
return self._alpha
def get_antialiased(self):
"""Return whether the object should try to do antialiased rendering."""
return self._antialiased
def get_capstyle(self):
"""Return the `.CapStyle`."""
return self._capstyle.name
def get_clip_rectangle(self):
"""
Return the clip rectangle as a `~matplotlib.transforms.Bbox` instance.
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a `~.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
tpath, tr = self._clippath.get_transformed_path_and_affine()
if np.all(np.isfinite(tpath.vertices)):
return tpath, tr
else:
_log.warning("Ill-defined clip_path detected. Returning None.")
return None, None
return None, None
def get_dashes(self):
"""
Return the dash style as an (offset, dash-list) pair.
See `.set_dashes` for details.
Default value is (None, None).
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""Return the `.JoinStyle`."""
return self._joinstyle.name
def get_linewidth(self):
"""Return the line width in points."""
return self._linewidth
def get_rgb(self):
"""Return a tuple of three or four floats from 0-1."""
return self._rgb
def get_url(self):
"""Return a url if one is set, None otherwise."""
return self._url
def get_gid(self):
"""Return the object identifier if one is set, None otherwise."""
return self._gid
def get_snap(self):
"""
Return the snap setting, which can be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line segments,
round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""Set whether object should be drawn with antialiased rendering."""
# Use ints to make life easier on extension code trying to read the gc.
self._antialiased = int(bool(b))
@_docstring.interpd
def set_capstyle(self, cs):
"""
Set how to draw endpoints of lines.
Parameters
----------
cs : `.CapStyle` or %(CapStyle)s
"""
self._capstyle = CapStyle(cs)
def set_clip_rectangle(self, rectangle):
"""Set the clip rectangle to a `.Bbox` or None."""
self._cliprect = rectangle
def set_clip_path(self, path):
"""Set the clip path to a `.TransformedPath` or None."""
_api.check_isinstance((transforms.TransformedPath, None), path=path)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
Parameters
----------
dash_offset : float
Distance, in points, into the dash pattern at which to
start the pattern. It is usually set to 0.
dash_list : array-like or None
The on-off sequence as points. None specifies a solid line. All
values must otherwise be non-negative (:math:`\\ge 0`).
Notes
-----
See p. 666 of the PostScript
`Language Reference
<https://www.adobe.com/jp/print/postscript/pdfs/PLRM.pdf>`_
for more info.
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl < 0.0):
raise ValueError(
"All values in the dash list must be non-negative")
if dl.size and not np.any(dl > 0.0):
raise ValueError(
'At least one value in the dash list must be positive')
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color.
Parameters
----------
fg : :mpltype:`color`
isRGBA : bool
If *fg* is known to be an ``(r, g, b, a)`` tuple, *isRGBA* can be
set to True to improve performance.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
@_docstring.interpd
def set_joinstyle(self, js):
"""
Set how to draw connections between line segments.
Parameters
----------
js : `.JoinStyle` or %(JoinStyle)s
"""
self._joinstyle = JoinStyle(js)
def set_linewidth(self, w):
"""Set the linewidth in points."""
self._linewidth = float(w)
def set_url(self, url):
"""Set the url for links in compatible backends."""
self._url = url
def set_gid(self, id):
"""Set the id."""
self._gid = id
def set_snap(self, snap):
"""
Set the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line segments,
round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""Set the hatch style (for fills)."""
self._hatch = hatch
def get_hatch(self):
"""Get the current hatch style."""
return self._hatch
def get_hatch_path(self, density=6.0):
"""Return a `.Path` for the current hatch."""
hatch = self.get_hatch()
if hatch is None:
return None
return Path.hatch(hatch, density)
def get_hatch_color(self):
"""Get the hatch color."""
return self._hatch_color
def set_hatch_color(self, hatch_color):
"""Set the hatch color."""
self._hatch_color = hatch_color
def get_hatch_linewidth(self):
"""Get the hatch linewidth."""
return self._hatch_linewidth
def set_hatch_linewidth(self, hatch_linewidth):
"""Set the hatch linewidth."""
self._hatch_linewidth = hatch_linewidth
def get_sketch_params(self):
"""
Return the sketch parameters for the artist.
Returns
-------
tuple or `None`
A 3-tuple with the following elements:
* ``scale``: The amplitude of the wiggle perpendicular to the
source line.
* ``length``: The length of the wiggle along the line.
* ``randomness``: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Set the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line, in
pixels. If scale is `None`, or not provided, no sketch filter will
be provided.
length : float, default: 128
The length of the wiggle along the line, in pixels.
randomness : float, default: 16
The scale factor by which the length is shrunken or expanded.
"""
self._sketch = (
None if scale is None
else (scale, length or 128., randomness or 16.))
| GraphicsContextBase |
python | ray-project__ray | python/ray/experimental/collective/conftest.py | {
"start": 280,
"end": 2124
} | class ____(Communicator):
"""
A dummy NCCL group for testing.
"""
def __init__(self, actor_handles: List[ray.actor.ActorHandle]):
self._actor_handles = actor_handles
self._rank = None
def initialize(self, rank: int) -> None:
self._rank = rank
def get_rank(self, actor: ray.actor.ActorHandle) -> int:
return self._actor_handles.index(actor)
def get_world_size(self) -> int:
return len(self._actor_handles)
def get_self_rank(self) -> Optional[int]:
return self._rank
def get_actor_handles(self) -> List["ray.actor.ActorHandle"]:
return self._actor_handles
def send(self, value: "torch.Tensor", peer_rank: int) -> None:
raise NotImplementedError
def recv(
self,
shape: Tuple[int],
dtype: "torch.dtype",
peer_rank: int,
allocator: Optional[TorchTensorAllocator] = None,
) -> "torch.Tensor":
raise NotImplementedError
def allgather(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
) -> None:
raise NotImplementedError
def allreduce(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
op: ReduceOp = ReduceOp.SUM,
) -> None:
raise NotImplementedError
def reducescatter(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
op: ReduceOp = ReduceOp.SUM,
) -> None:
raise NotImplementedError
@property
def recv_stream(self):
return None
@property
def send_stream(self):
return None
def destroy(self) -> None:
pass
def get_transport_name(self) -> str:
return "accelerator"
@classmethod
def generate_communicator_id(cls) -> str:
pass
| AbstractNcclGroup |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/nudge/base.py | {
"start": 659,
"end": 6180
} | class ____:
"""
The algorithm implemented here and the current state of the art is called [NUDGE](https://www.arxiv.org/abs/2409.02343).
If a validation dataset is provided, the best model is evaluated and saved based on the validation loss at the end of every epoch.
Args:
train_dataset (EmbeddingQAFinetuneDataset): Dataset to finetune on.
embed_model (BaseEmbedding): Embedding model.
val_dataset (EmbeddingQAFinetuneDataset): Validation dataset.
use_nudge_n (bool): Whether to use NUDGE-N or NUDGE-M. Defaults to True.
device (Optional[str]): Device to use. Defaults to None.
"""
def __init__(
self,
embed_model: BaseEmbedding,
train_dataset: EmbeddingQAFinetuneDataset,
val_dataset: EmbeddingQAFinetuneDataset,
use_nudge_n: bool = True,
device: Optional[str] = None,
) -> None:
"""Init params."""
try:
from nudge import NUDGEN, NUDGEM
except ImportError:
raise ImportError(NUDGE_IMPORT_ERROR_MSG)
try:
import torch
except ImportError:
raise ImportError(PYTORCH_IMPORT_ERROR_MSG)
if device is None:
device = infer_torch_device()
logger.info(f"Use pytorch device: {device}")
self._target_device = torch.device(device)
self.embed_model = embed_model
self.corpus = train_dataset.corpus
self.corpus_embeddings = self._get_corpus_embeddings(self.corpus)
self.train_dataset = self._format_dataset(train_dataset, self.corpus)
self.val_dataset = self._format_dataset(val_dataset, self.corpus)
self.nudge = (
NUDGEN(device=self._target_device)
if use_nudge_n
else NUDGEM(device=self._target_device)
)
def _format_dataset(
self, dataset: EmbeddingQAFinetuneDataset, corpus: Dict[str, str]
):
"""
Convert the dataset into NUDGE format.
Args:
dataset (EmbeddingQAFinetuneDataset): Dataset to convert.
"""
try:
import numpy as np
except ImportError:
raise ImportError(NUMPY_IMPORT_ERROR_MSG)
q_embs = []
q_ans_indx = []
corpus_keys = list(corpus.keys())
for query_id, query in dataset.queries.items():
query_embedding = self.embed_model.get_query_embedding(query)
q_embs.append(query_embedding)
relevant_docs = dataset.relevant_docs[query_id]
relevant_doc_indices = [corpus_keys.index(doc) for doc in relevant_docs]
q_ans_indx.append(relevant_doc_indices)
return {"q_embs": np.array(q_embs), "q_ans_indx": q_ans_indx}
def _get_corpus_embeddings(self, corpus: Dict[str, str]):
"""Get corpus embeddings."""
try:
import numpy as np
except ImportError:
raise ImportError(NUMPY_IMPORT_ERROR_MSG)
text_embeddings = [
self.embed_model.get_text_embedding(text) for text in corpus.values()
]
return np.array(text_embeddings)
def finetune(self):
self.corpus_embeddings = self.nudge.finetune_embeddings(
embeddings=self.corpus_embeddings,
train_set=self.train_dataset,
val_set=self.val_dataset,
nontrain_embeddings=None,
val_batch_size=256,
gamma=None,
)
def insert_data_and_finetune(
self,
new_train_dataset_batch: EmbeddingQAFinetuneDataset,
new_val_dataset_batch: Optional[EmbeddingQAFinetuneDataset] = None,
):
"""
Insert data and finetune. This should only be done if the new data you are inserting does not conflict with the already existing data. It's important to not finetune multiple times as this can cause the embeddings to lose semantic meaning since they will become further from the original embeddings.
"""
try:
import numpy as np
except ImportError:
raise ImportError(NUMPY_IMPORT_ERROR_MSG)
new_corpus_batch = new_train_dataset_batch.corpus
# if any of the new ids are already in the existing corpus, raise an error
if any(id in self.corpus for id in new_corpus_batch):
raise ValueError(
f"ID {id} already exists in the existing corpus. New IDs must be unique."
)
# get the embeddings for the new corpus
new_corpus_initial_embeddings_batch = self._get_corpus_embeddings(
new_corpus_batch
)
existing_corpus_embeddings = self.corpus_embeddings
new_train_dataset = self._format_dataset(
new_train_dataset_batch, new_corpus_batch
)
new_val_dataset = self._format_dataset(new_val_dataset_batch, new_corpus_batch)
new_corpus_embeddings_batch = self.nudge.finetune_embeddings(
embeddings=new_corpus_initial_embeddings_batch,
train_set=new_train_dataset,
val_set=new_val_dataset,
# runs faster by filtering the embeddings which will not have any queries
nontrain_embeddings=existing_corpus_embeddings,
val_batch_size=256,
gamma=None,
)
self.corpus_embeddings = np.concatenate(
[existing_corpus_embeddings, new_corpus_embeddings_batch]
)
def get_finetuned_corpus_embeddings(self):
return self.corpus_embeddings
| Nudge |
python | tensorflow__tensorflow | tensorflow/python/types/core.py | {
"start": 4654,
"end": 5545
} | class ____(Callable):
"""Base class for graph functions.
An `AtomicFunction` encapsulates a single graph function definition.
`AtomicFunction` can be called directly only if no captures are needed
according to the `FunctionType`. If captures are present, please use
`call_with_captures` instead.
`AtomicFunction` does not support gradients. Please use the parent
`ConcreteFunction` if you need gradient support.
"""
def call_with_captures(self, args, kwargs, captures):
"""Calls this AtomicFunction with captures as defined by its FunctionType.
Args:
args: Tuple containing positional arguments
kwargs: Dict containing keyword arguments
captures: Tuple of tensors supplying captured tensor values.
Returns:
A structured output value based on the inputs.
"""
@tf_export("types.experimental.ConcreteFunction", v1=[])
| AtomicFunction |
python | celery__celery | t/unit/tasks/test_trace.py | {
"start": 19969,
"end": 21585
} | class ____(TraceCase):
class TI(TraceInfo):
__slots__ = TraceInfo.__slots__ + ('__dict__',)
def test_handle_error_state(self):
x = self.TI(states.FAILURE)
x.handle_failure = Mock()
x.handle_error_state(self.add_cast, self.add_cast.request)
x.handle_failure.assert_called_with(
self.add_cast,
self.add_cast.request,
store_errors=self.add_cast.store_errors_even_if_ignored,
call_errbacks=True,
)
def test_handle_error_state_for_eager_task(self):
x = self.TI(states.FAILURE)
x.handle_failure = Mock()
x.handle_error_state(self.add, self.add.request, eager=True)
x.handle_failure.assert_called_once_with(
self.add,
self.add.request,
store_errors=False,
call_errbacks=True,
)
def test_handle_error_for_eager_saved_to_backend(self):
x = self.TI(states.FAILURE)
x.handle_failure = Mock()
self.add.store_eager_result = True
x.handle_error_state(self.add, self.add.request, eager=True)
x.handle_failure.assert_called_with(
self.add,
self.add.request,
store_errors=True,
call_errbacks=True,
)
@patch('celery.app.trace.ExceptionInfo')
def test_handle_reject(self, ExceptionInfo):
x = self.TI(states.FAILURE)
x._log_error = Mock(name='log_error')
req = Mock(name='req')
x.handle_reject(self.add, req)
x._log_error.assert_called_with(self.add, req, ExceptionInfo())
| test_TraceInfo |
python | coleifer__peewee | tests/models.py | {
"start": 1257,
"end": 1445
} | class ____(TestModel):
content = TextField(column_name='Content')
timestamp = DateTimeField(column_name='TimeStamp',
default=datetime.datetime.now)
| Post |
python | xlwings__xlwings | tests/reports/test_report.py | {
"start": 6409,
"end": 11970
} | class ____(unittest.TestCase):
def tearDown(self):
xw.Book(this_dir / "output.xlsx").app.quit()
def test_one_frame(self):
df = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], columns=["c1", "c2"], index=["r1", "r2"]
)
wb = render_template(
this_dir / "template_one_frame.xlsx",
this_dir / "output.xlsx",
df=df.reset_index(),
title="MyTitle",
)
for i in range(2):
sheet = wb.sheets[i]
self.assertEqual(sheet["A1"].value, "MyTitle")
self.assertEqual(sheet["A3"].value, "PART ONE")
self.assertEqual(sheet["A8"].value, "PART TWO")
if i == 0:
assert_frame_equal(
sheet["A4"].options(pd.DataFrame, expand="table").value,
df.reset_index().set_index("index"),
)
assert_frame_equal(
sheet["A9"].options(pd.DataFrame, expand="table").value,
df.reset_index().set_index("index"),
)
elif i == 1:
df_table1 = sheet["A4"].options(pd.DataFrame, expand="table").value
df_table1.index.name = None
df_table2 = sheet["A9"].options(pd.DataFrame, expand="table").value
df_table2.index.name = None
assert_frame_equal(df_table1, df)
assert_frame_equal(df_table2, df)
self.assertEqual(sheet["A3"].color, (0, 176, 240))
self.assertEqual(sheet["A8"].color, (0, 176, 240))
def test_two_frames(self):
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
columns=["c1", "c2", "c3"],
index=["r1", "r2", "r3"],
)
df1.index.name = "df1"
df3 = pd.DataFrame(
[
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
[10.0, 11.0, 12.0],
[13.0, 14.0, 15.0],
],
columns=["c1", "c2", "c3"],
index=["r1", "r2", "r3", "r4", "r5"],
)
df3.index.name = "df3"
text = "abcd"
pic = Image(this_dir / "xlwings.jpg")
data = dict(
df1=df1.reset_index(),
df2="df2 dummy",
df3=df3.reset_index(),
text=text,
pic=pic,
)
wb = render_template(
this_dir / "template_two_frames.xlsx", this_dir / "output.xlsx", **data
)
sheet = wb.sheets[0]
# values
assert_frame_equal(sheet["A1"].options(pd.DataFrame, expand="table").value, df3)
self.assertEqual(sheet["A8"].value, "df2 dummy")
self.assertEqual(sheet["C10"].value, "abcd")
assert_frame_equal(
sheet["A12"].options(pd.DataFrame, expand="table").value, df1
)
assert_frame_equal(
sheet["A17"].options(pd.DataFrame, expand="table").value, df3
)
assert_frame_equal(
sheet["A24"].options(pd.DataFrame, expand="table").value, df3
)
assert_frame_equal(
sheet["A31"].options(pd.DataFrame, expand="table").value, df3
)
assert_frame_equal(sheet["F1"].options(pd.DataFrame, expand="table").value, df1)
self.assertEqual(sheet["G6"].value, "abcd")
assert_frame_equal(sheet["F8"].options(pd.DataFrame, expand="table").value, df3)
assert_frame_equal(
sheet["F15"].options(pd.DataFrame, expand="table").value, df1
)
assert_frame_equal(
sheet["F27"].options(pd.DataFrame, expand="table").value, df1
)
self.assertEqual(sheet["F32"].value, "df2 dummy")
assert_frame_equal(
sheet["F34"].options(pd.DataFrame, expand="table").value, df3
)
# colors
self.assertEqual(sheet["A2:D6"].color, (221, 235, 247))
self.assertEqual(sheet["A13:D15"].color, (221, 235, 247))
self.assertEqual(sheet["A18:D22"].color, (221, 235, 247))
self.assertEqual(sheet["A25:D29"].color, (221, 235, 247))
self.assertEqual(sheet["A32:D36"].color, (221, 235, 247))
self.assertEqual(sheet["F2:I4"].color, (221, 235, 247))
self.assertEqual(sheet["F9:I13"].color, (221, 235, 247))
self.assertEqual(sheet["F16:I18"].color, (221, 235, 247))
self.assertEqual(sheet["F28:I30"].color, (221, 235, 247))
self.assertEqual(sheet["F35:I39"].color, (221, 235, 247))
# borders
# TODO: pending Border implementation in xlwings
if sys.platform.startswith("darwin"):
from appscript import k as kw
for cell in [
"A4",
"A14",
"D20",
"A28",
"D36",
"F4",
"H10",
"G17",
"G28",
"I36",
]:
self.assertEqual(
sheet[cell]
.api.get_border(which_border=kw.edge_top)
.properties()
.get(kw.line_style),
kw.continuous,
)
self.assertEqual(
sheet[cell]
.api.get_border(which_border=kw.edge_bottom)
.properties()
.get(kw.line_style),
kw.continuous,
)
else:
pass
# TODO
| TestFrames |
python | huggingface__transformers | src/transformers/models/dinov2/modeling_dinov2.py | {
"start": 1435,
"end": 5113
} | class ____(nn.Module):
"""
Construct the CLS token, mask token, position and patch embeddings.
"""
def __init__(self, config: Dinov2Config) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.patch_embeddings = Dinov2PatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.use_mask_token = config.use_mask_token
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and interpolation at torch.float32 precision.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
target_dtype = patch_pos_embed.dtype
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.to(torch.float32),
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
).to(dtype=target_dtype)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None and self.use_mask_token:
embeddings = torch.where(
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
)
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings
| Dinov2Embeddings |
python | apache__airflow | providers/common/messaging/src/airflow/providers/common/messaging/triggers/msg_queue.py | {
"start": 1520,
"end": 6112
} | class ____(BaseEventTrigger):
"""
``MessageQueueTrigger`` serves as a unified trigger for monitoring message queues from different providers.
It abstracts away provider-specific details, allowing users to monitor a queue with a single trigger,
regardless of the underlying provider.
This makes it easy to switch providers without modifying the trigger.
:param scheme: The queue scheme (e.g., 'kafka', 'redis+pubsub', 'sqs'). Used for provider matching.
:param queue: **Deprecated** The queue identifier (URI format). If provided, this takes precedence over scheme parameter.
This parameter is deprecated and will be removed in future versions. Use the 'scheme' parameter instead.
.. seealso::
For more information on how to use this trigger, take a look at the guide:
:ref:`howto/trigger:MessageQueueTrigger`
"""
queue: str | None = None
scheme: str | None = None
def __init__(self, *, queue: str | None = None, scheme: str | None = None, **kwargs: Any) -> None:
if queue is None and scheme is None:
raise ValueError("Either `queue` or `scheme` parameter must be provided.")
# For backward compatibility, queue takes precedence
if queue is not None:
warnings.warn(
"The `queue` parameter is deprecated and will be removed in future versions. "
"Use the `scheme` parameter instead and pass configuration as keyword arguments to `MessageQueueTrigger`.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.queue = queue
self.scheme = None
else:
self.queue = None
self.scheme = scheme
self.kwargs = kwargs
@cached_property
def trigger(self) -> BaseEventTrigger:
if len(MESSAGE_QUEUE_PROVIDERS) == 0:
self.log.error(
"No message queue providers are available. "
"Please ensure that you have the necessary providers installed."
)
raise ValueError("No message queue providers are available. ")
# Find matching providers based on queue URI or scheme
if self.queue is not None:
# Use existing queue-based matching for backward compatibility
providers = [
provider for provider in MESSAGE_QUEUE_PROVIDERS if provider.queue_matches(self.queue)
]
identifier = self.queue
match_by = "queue"
elif self.scheme is not None:
# Use new scheme-based matching
providers = [
provider for provider in MESSAGE_QUEUE_PROVIDERS if provider.scheme_matches(self.scheme)
]
identifier = self.scheme
match_by = "scheme"
if len(providers) == 0:
self.log.error(
"The %s '%s' is not recognized by any of the registered providers. "
"The available providers are: '%s'.",
match_by,
identifier,
", ".join([type(provider).__name__ for provider in MESSAGE_QUEUE_PROVIDERS]),
)
raise ValueError(
f"The {match_by} '{identifier}' is not recognized by any of the registered providers."
)
if len(providers) > 1:
self.log.error(
"The %s '%s' is recognized by more than one provider. "
"At least two providers in ``MESSAGE_QUEUE_PROVIDERS`` are colliding with each "
"other: '%s'",
match_by,
identifier,
", ".join([type(provider).__name__ for provider in providers]),
)
raise ValueError(f"The {match_by} '{identifier}' is recognized by more than one provider.")
# Create trigger instance
selected_provider = providers[0]
if self.queue is not None:
# Pass queue to trigger_kwargs for backward compatibility
trigger_kwargs = selected_provider.trigger_kwargs(self.queue, **self.kwargs)
return selected_provider.trigger_class()(**trigger_kwargs, **self.kwargs)
# For scheme-based matching, we need to pass all current kwargs to the trigger
return selected_provider.trigger_class()(**self.kwargs)
def serialize(self) -> tuple[str, dict[str, Any]]:
return self.trigger.serialize()
async def run(self) -> AsyncIterator[TriggerEvent]:
async for event in self.trigger.run():
yield event
| MessageQueueTrigger |
python | viewflow__viewflow | viewflow/views/update.py | {
"start": 1012,
"end": 5452
} | class ____(
FormLayoutMixin, FormDependentSelectMixin, FormAjaxCompleteMixin, generic.UpdateView
):
viewset = None
layout = None
form_widgets = None
page_actions = None
def has_change_permission(self, request, obj=None):
if self.viewset is not None and hasattr(self.viewset, "has_change_permission"):
return self.viewset.has_change_permission(request.user, obj=obj)
else:
return has_object_perm(request.user, "change", self.model, obj=obj)
def get_object_url(self, obj):
if self.viewset is not None and hasattr(self.viewset, "get_object_url"):
return self.viewset.get_object_url(self.request, obj)
elif hasattr(obj, "get_absolute_url"):
if self.has_change_permission(self.request, obj):
return obj.get_absolute_url()
def get_page_actions(self, *actions):
if self.viewset and hasattr(self.viewset, "get_update_page_actions"):
actions = (
self.viewset.get_update_page_actions(self.request, self.object)
+ actions
)
if self.page_actions:
actions = self.page_actions + actions
return actions
def message_user(self):
url = self.get_object_url(self.object)
link = ""
if url:
link = format_html('<a href="{}">{}</a>', urlquote(url), _("View"))
message = format_html(
_("The {obj} was changed successfully. {link}"),
obj=str(self.object),
link=link,
)
messages.add_message(
self.request, messages.SUCCESS, message, fail_silently=True
)
@viewprop
def queryset(self):
if self.viewset is not None and hasattr(self.viewset, "get_queryset"):
return self.viewset.get_queryset(self.request)
return None
def get_form_widgets(self):
if self.form_widgets is not None:
return self.form_widgets
elif self.viewset and hasattr(self.viewset, "get_update_form_widgets"):
return self.viewset.get_update_form_widgets(self.request)
elif self.viewset and hasattr(self.viewset, "get_form_widgets"):
return self.viewset.get_form_widgets(self.request)
return None
def get_form_class(self):
if self.form_class is not None:
return self.form_class
elif self.viewset and hasattr(self.viewset, "get_update_form_class"):
return self.viewset.get_update_form_class(self.request)
elif self.viewset and hasattr(self.viewset, "get_form_class"):
return self.viewset.get_form_class(self.request)
else:
return modelform_factory(
self.model,
form=ModelForm,
fields=self.fields,
widgets=self.get_form_widgets(),
)
def get_object(self):
pk = self.kwargs.get(self.pk_url_kwarg)
if pk is not None:
pk = unquote(pk)
try:
self.kwargs[self.pk_url_kwarg] = self.model._meta.pk.to_python(pk)
except (ValidationError, ValueError):
raise Http404
obj = super().get_object()
if not self.has_change_permission(self.request, obj):
raise PermissionDenied
return obj
def get_template_names(self):
"""
List of templates for the view.
If no `self.template_name` defined, uses::
[<app_label>/<model_label>_<suffix>.html,
<app_label>/<model_label>_form.html,
'viewflow/views/form.html']
"""
if self.template_name is None:
opts = self.model._meta
return [
"{}/{}{}.html".format(
opts.app_label, opts.model_name, self.template_name_suffix
),
"{}/{}_form.html".format(opts.app_label, opts.model_name),
"viewflow/views/form.html",
]
return [self.template_name]
def form_valid(self, *args, **kwargs):
response = super(UpdateModelView, self).form_valid(*args, **kwargs)
self.message_user()
return response
def get_success_url(self):
if self.viewset and hasattr(self.viewset, "get_success_url"):
return self.viewset.get_success_url(self.request, obj=self.object)
return "../"
| UpdateModelView |
python | dagster-io__dagster | python_modules/libraries/dagster-duckdb-pandas/dagster_duckdb_pandas/duckdb_pandas_type_handler.py | {
"start": 6023,
"end": 9068
} | class ____(DuckDBIOManager):
"""An I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When
using the DuckDBPandasIOManager, any inputs and outputs without type annotations will be loaded
as Pandas DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_duckdb_pandas import DuckDBPandasIOManager
@asset(
key_prefix=["my_schema"] # will be used as the schema in DuckDB
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={"io_manager": DuckDBPandasIOManager(database="my_db.duckdb")}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the DuckDB I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
Definitions(
assets=[my_table],
resources={"io_manager": DuckDBPandasIOManager(database="my_db.duckdb", schema="my_schema")}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in duckdb
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in duckdb
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
"""
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [DuckDBPandasTypeHandler()]
@staticmethod
def default_load_type() -> Optional[type]:
return pd.DataFrame
| DuckDBPandasIOManager |
python | apache__airflow | helm-tests/tests/helm_tests/other/test_flower.py | {
"start": 25966,
"end": 27507
} | class ____:
"""Tests flower service account."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"flower": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/flower/flower-serviceaccount.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_default_automount_service_account_token(self):
docs = render_chart(
values={
"flower": {
"enabled": True,
"serviceAccount": {
"create": True,
},
}
},
show_only=["templates/flower/flower-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is True
def test_overridden_automount_service_account_token(self):
docs = render_chart(
values={
"flower": {
"enabled": True,
"serviceAccount": {"create": True, "automountServiceAccountToken": False},
}
},
show_only=["templates/flower/flower-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is False
| TestFlowerServiceAccount |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/result.py | {
"start": 61496,
"end": 66118
} | class ____(FilterResult[_R], util.TypingOnly):
"""A :class:`_engine.Result` that's typed as returning plain
Python tuples instead of rows.
Since :class:`_engine.Row` acts like a tuple in every way already,
this class is a typing only class, regular :class:`_engine.Result` is
still used at runtime.
"""
__slots__ = ()
if TYPE_CHECKING:
def partitions(
self, size: Optional[int] = None
) -> Iterator[Sequence[_R]]:
"""Iterate through sub-lists of elements of the size given.
Equivalent to :meth:`_engine.Result.partitions` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def fetchone(self) -> Optional[_R]:
"""Fetch one tuple.
Equivalent to :meth:`_engine.Result.fetchone` except that
tuple values, rather than :class:`_engine.Row`
objects, are returned.
"""
...
def fetchall(self) -> Sequence[_R]:
"""A synonym for the :meth:`_engine.ScalarResult.all` method."""
...
def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]:
"""Fetch many objects.
Equivalent to :meth:`_engine.Result.fetchmany` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def all(self) -> Sequence[_R]: # noqa: A001
"""Return all scalar values in a sequence.
Equivalent to :meth:`_engine.Result.all` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def __iter__(self) -> Iterator[_R]: ...
def __next__(self) -> _R: ...
def first(self) -> Optional[_R]:
"""Fetch the first object or ``None`` if no object is present.
Equivalent to :meth:`_engine.Result.first` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def one_or_none(self) -> Optional[_R]:
"""Return at most one object or raise an exception.
Equivalent to :meth:`_engine.Result.one_or_none` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def one(self) -> _R:
"""Return exactly one object or raise an exception.
Equivalent to :meth:`_engine.Result.one` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
@overload
def scalar_one(self: TupleResult[Tuple[_T]]) -> _T: ...
@overload
def scalar_one(self) -> Any: ...
def scalar_one(self) -> Any:
"""Return exactly one scalar result or raise an exception.
This is equivalent to calling :meth:`_engine.Result.scalars`
and then :meth:`_engine.ScalarResult.one`.
.. seealso::
:meth:`_engine.ScalarResult.one`
:meth:`_engine.Result.scalars`
"""
...
@overload
def scalar_one_or_none(
self: TupleResult[Tuple[_T]],
) -> Optional[_T]: ...
@overload
def scalar_one_or_none(self) -> Optional[Any]: ...
def scalar_one_or_none(self) -> Optional[Any]:
"""Return exactly one or no scalar result.
This is equivalent to calling :meth:`_engine.Result.scalars`
and then :meth:`_engine.ScalarResult.one_or_none`.
.. seealso::
:meth:`_engine.ScalarResult.one_or_none`
:meth:`_engine.Result.scalars`
"""
...
@overload
def scalar(self: TupleResult[Tuple[_T]]) -> Optional[_T]: ...
@overload
def scalar(self) -> Any: ...
def scalar(self) -> Any:
"""Fetch the first column of the first row, and close the result
set.
Returns ``None`` if there are no rows to fetch.
No validation is performed to test if additional rows remain.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.CursorResult.close`
method will have been called.
:return: a Python scalar value , or ``None`` if no rows remain.
"""
...
| TupleResult |
python | eth-brownie__brownie | brownie/typing.py | {
"start": 2538,
"end": 2891
} | class ____(_ContractBuildJson):
type: Literal["contract"]
language: Literal["Vyper"]
ContractBuildJson = SolidityBuildJson | VyperBuildJson
BuildJson = ContractBuildJson | InterfaceBuildJson
# Compiler
Language = Literal["Solidity", "Vyper"]
EvmVersion = NewType("EvmVersion", str)
Source = Tuple[Start, Stop, ContractName, str]
| VyperBuildJson |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 54012,
"end": 54067
} | class ____: pass
Recompute = RecomputeType()
| RecomputeType |
python | wandb__wandb | landfill/functional_tests/artifacts/use-and-link-model.py | {
"start": 187,
"end": 1474
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def main():
run = wandb.init()
my_model = Net()
sm = _SavedModel.init(my_model)
art = wandb.Artifact("my-model", "model")
art.add(sm, "index")
art = run.log_artifact(art)
art.wait()
# use_model() hits the download path where we try to download the file
# using entry._file_url, which fails in this test harness
# TODO: Remove the download() call once caching is implemented in nexus
art.download()
sm = use_model("my-model:latest")
link_model(sm, "project/test_portfolio")
run.finish()
if __name__ == "__main__":
main()
| Net |
python | pyinstaller__pyinstaller | bootloader/waflib/TaskGen.py | {
"start": 292,
"end": 12755
} | class ____(object):
mappings = Utils.ordered_iter_dict()
prec = Utils.defaultdict(set)
def __init__(self, *k, **kw):
self.source = []
self.target = ''
self.meths = []
self.features = []
self.tasks = []
if not 'bld' in kw:
self.env = ConfigSet.ConfigSet()
self.idx = 0
self.path = None
else:
self.bld = kw['bld']
self.env = self.bld.env.derive()
self.path = kw.get('path', self.bld.path)
path = self.path.abspath()
try:
self.idx = self.bld.idx[path] = self.bld.idx.get(path, 0) + 1
except AttributeError:
self.bld.idx = {}
self.idx = self.bld.idx[path] = 1
try:
self.tg_idx_count = self.bld.tg_idx_count = self.bld.tg_idx_count + 1
except AttributeError:
self.tg_idx_count = self.bld.tg_idx_count = 1
for key, val in kw.items():
setattr(self, key, val)
def __str__(self):
return "<task_gen %r declared in %s>" % (self.name, self.path.abspath())
def __repr__(self):
lst = []
for x in self.__dict__:
if x not in ('env', 'bld', 'compiled_tasks', 'tasks'):
lst.append("%s=%s" % (x, repr(getattr(self, x))))
return "bld(%s) in %s" % (", ".join(lst), self.path.abspath())
def get_cwd(self):
return self.bld.bldnode
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target, list):
lst = [str(x) for x in self.target]
name = self._name = ','.join(lst)
else:
name = self._name = str(self.target)
return name
def set_name(self, name):
self._name = name
name = property(get_name, set_name)
def to_list(self, val):
if isinstance(val, str):
return val.split()
else:
return val
def post(self):
if getattr(self, 'posted', None):
return False
self.posted = True
keys = set(self.meths)
keys.update(feats['*'])
self.features = Utils.to_list(self.features)
for x in self.features:
st = feats[x]
if st:
keys.update(st)
elif not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it?', x)
prec = {}
prec_tbl = self.prec
for x in prec_tbl:
if x in keys:
prec[x] = prec_tbl[x]
tmp = []
for a in keys:
for x in prec.values():
if a in x:
break
else:
tmp.append(a)
tmp.sort(reverse=True)
out = []
while tmp:
e = tmp.pop()
if e in keys:
out.append(e)
try:
nlst = prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
tmp.sort(reverse=True)
if prec:
buf = ['Cycle detected in the method execution:']
for k, v in prec.items():
buf.append('- %s after %s' % (k, [x for x in v if x in prec]))
raise Errors.WafError('\n'.join(buf))
self.meths = out
Logs.debug('task_gen: posting %s %d', self, id(self))
for x in out:
try:
v = getattr(self, x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method' % x)
Logs.debug('task_gen: -> %s (%d)', x, id(self))
v()
Logs.debug('task_gen: posted %s', self.name)
return True
def get_hook(self, node):
name = node.name
for k in self.mappings:
try:
if name.endswith(k):
return self.mappings[k]
except TypeError:
if k.match(name):
return self.mappings[k]
keys = list(self.mappings.keys())
raise Errors.WafError("File %r has no mapping in %r (load a waf tool?)" % (node, keys))
def create_task(self, name, src=None, tgt=None, **kw):
task = Task.classes[name](env=self.env.derive(), generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
task.__dict__.update(kw)
self.tasks.append(task)
return task
def clone(self, env):
newobj = self.bld()
for x in self.__dict__:
if x in ('env', 'bld'):
continue
elif x in ('path', 'features'):
setattr(newobj, x, getattr(self, x))
else:
setattr(newobj, x, copy.copy(getattr(self, x)))
newobj.posted = False
if isinstance(env, str):
newobj.env = self.bld.all_envs[env].derive()
else:
newobj.env = env.derive()
return newobj
def declare_chain(
name='',
rule=None,
reentrant=None,
color='BLUE',
ext_in=[],
ext_out=[],
before=[],
after=[],
decider=None,
scan=None,
install_path=None,
shell=False
):
ext_in = Utils.to_list(ext_in)
ext_out = Utils.to_list(ext_out)
if not name:
name = rule
cls = Task.task_factory(
name, rule, color=color, ext_in=ext_in, ext_out=ext_out, before=before, after=after, scan=scan, shell=shell
)
def x_file(self, node):
if ext_in:
_ext_in = ext_in[0]
tsk = self.create_task(name, node)
cnt = 0
ext = decider(self, node) if decider else cls.ext_out
for x in ext:
k = node.change_ext(x, ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant != None:
if cnt < int(reentrant):
self.source.append(k)
else:
for y in self.mappings:
if k.name.endswith(y):
self.source.append(k)
break
cnt += 1
if install_path:
self.install_task = self.add_install_files(install_to=install_path, install_from=tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x] = x_file
return x_file
def taskgen_method(func):
setattr(task_gen, func.__name__, func)
return func
def feature(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
task_gen.prec[func.__name__].add(fun_name)
return func
return deco
before = before_method
def after_method(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
task_gen.prec[fun_name].add(func.__name__)
return func
return deco
after = after_method
def extension(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for x in k:
task_gen.mappings[x] = func
return func
return deco
@taskgen_method
def to_nodes(self, lst, path=None):
tmp = []
path = path or self.path
find = path.find_resource
if isinstance(lst, Node.Node):
lst = [lst]
for x in Utils.to_list(lst):
if isinstance(x, str):
node = find(x)
elif hasattr(x, 'name'):
node = x
else:
tmp.extend(self.to_nodes(x))
continue
if not node:
raise Errors.WafError('source not found: %r in %r' % (x, self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source = self.to_nodes(getattr(self, 'source', []))
for node in self.source:
self.get_hook(node)(self, node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self, 'rule', None):
return
name = str(getattr(self, 'name', None) or self.target or getattr(self.rule, '__name__', self.rule))
try:
cache = self.bld.cache_rule_attr
except AttributeError:
cache = self.bld.cache_rule_attr = {}
chmod = getattr(self, 'chmod', None)
shell = getattr(self, 'shell', True)
color = getattr(self, 'color', 'BLUE')
scan = getattr(self, 'scan', None)
_vars = getattr(self, 'vars', [])
cls_str = getattr(self, 'cls_str', None)
cls_keyword = getattr(self, 'cls_keyword', None)
use_cache = getattr(self, 'cache_rule', 'True')
deep_inputs = getattr(self, 'deep_inputs', False)
scan_val = has_deps = hasattr(self, 'deps')
if scan:
scan_val = id(scan)
key = Utils.h_list((name, self.rule, chmod, shell, color, cls_str, cls_keyword, scan_val, _vars, deep_inputs))
cls = None
if use_cache:
try:
cls = cache[key]
except KeyError:
pass
if not cls:
rule = self.rule
if chmod is not None:
def chmod_fun(tsk):
for x in tsk.outputs:
os.chmod(x.abspath(), tsk.generator.chmod)
if isinstance(rule, tuple):
rule = list(rule)
rule.append(chmod_fun)
rule = tuple(rule)
else:
rule = (rule, chmod_fun)
cls = Task.task_factory(name, rule, _vars, shell=shell, color=color)
if cls_str:
setattr(cls, '__str__', self.cls_str)
if cls_keyword:
setattr(cls, 'keyword', self.cls_keyword)
if deep_inputs:
Task.deep_inputs(cls)
if scan:
cls.scan = self.scan
elif has_deps:
def scan(self):
nodes = []
for x in self.generator.to_list(getattr(self.generator, 'deps', None)):
node = self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)' % x)
nodes.append(node)
return [nodes, []]
cls.scan = scan
if use_cache:
cache[key] = cls
tsk = self.create_task(name)
for x in ('after', 'before', 'ext_in', 'ext_out'):
setattr(tsk, x, getattr(self, x, []))
if hasattr(self, 'stdout'):
tsk.stdout = self.stdout
if hasattr(self, 'stderr'):
tsk.stderr = self.stderr
if getattr(self, 'timeout', None):
tsk.timeout = self.timeout
if getattr(self, 'always', None):
tsk.always_run = True
if getattr(self, 'target', None):
if isinstance(self.target, str):
self.target = self.target.split()
if not isinstance(self.target, list):
self.target = [self.target]
for x in self.target:
if isinstance(x, str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self, 'install_path', None):
self.install_task = self.add_install_files(
install_to=self.install_path, install_from=tsk.outputs, chmod=getattr(self, 'chmod', Utils.O644)
)
if getattr(self, 'source', None):
tsk.inputs = self.to_nodes(self.source)
self.source = []
if getattr(self, 'cwd', None):
tsk.cwd = self.cwd
if isinstance(tsk.run, functools.partial):
tsk.run = functools.partial(tsk.run, tsk)
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1] != 'sequence_order':
self.meths.append('sequence_order')
return
if getattr(self, 'seq_start', None):
return
if getattr(self.bld, 'prev', None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev = self
re_m4 = re.compile(r'@(\w+)@', re.M)
| task_gen |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 14506,
"end": 15309
} | class ____(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
| SimpleElement |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 3387,
"end": 3525
} | class ____(Protocol[_T]):
def __call__(
self, expr: ColumnElement[_T]
) -> TypeEngine.Comparator[_T]: ...
| _ComparatorFactory |
python | matplotlib__matplotlib | lib/matplotlib/sphinxext/plot_directive.py | {
"start": 17031,
"end": 18169
} | class ____:
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, f"{self.basename}.{format}")
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived, includes=None):
"""
Return whether *derived* is out-of-date relative to *original* or any of
the RST files included in it using the RST include directive (*includes*).
*derived* and *original* are full paths, and *includes* is optionally a
list of full paths which may have been included in the *original*.
"""
if not os.path.exists(derived):
return True
if includes is None:
includes = []
files_to_check = [original, *includes]
def out_of_date_one(original, derived_mtime):
return (os.path.exists(original) and
derived_mtime < os.stat(original).st_mtime)
derived_mtime = os.stat(derived).st_mtime
return any(out_of_date_one(f, derived_mtime) for f in files_to_check)
| ImageFile |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 9231,
"end": 10583
} | class ____(AtomicRule):
"""integrate(poly(x)/sqrt(a+b*x+c*x**2), x)"""
a: Expr
b: Expr
c: Expr
coeffs: list[Expr]
def eval(self) -> Expr:
a, b, c, coeffs, x = self.a, self.b, self.c, self.coeffs.copy(), self.variable
# Integrate poly/sqrt(a+b*x+c*x**2) using recursion.
# coeffs are coefficients of the polynomial.
# Let I_n = x**n/sqrt(a+b*x+c*x**2), then
# I_n = A * x**(n-1)*sqrt(a+b*x+c*x**2) - B * I_{n-1} - C * I_{n-2}
# where A = 1/(n*c), B = (2*n-1)*b/(2*n*c), C = (n-1)*a/(n*c)
# See https://github.com/sympy/sympy/pull/23608 for proof.
result_coeffs = []
coeffs = coeffs.copy()
for i in range(len(coeffs)-2):
n = len(coeffs)-1-i
coeff = coeffs[i]/(c*n)
result_coeffs.append(coeff)
coeffs[i+1] -= (2*n-1)*b/2*coeff
coeffs[i+2] -= (n-1)*a*coeff
d, e = coeffs[-1], coeffs[-2]
s = sqrt(a+b*x+c*x**2)
constant = d-b*e/(2*c)
if constant == 0:
I0 = 0
else:
step = inverse_trig_rule(IntegralInfo(1/s, x), degenerate=False)
I0 = constant*step.eval()
return Add(*(result_coeffs[i]*x**(len(coeffs)-2-i)
for i in range(len(result_coeffs))), e/c)*s + I0
@dataclass
| SqrtQuadraticDenomRule |
python | tensorflow__tensorflow | tensorflow/python/keras/losses.py | {
"start": 14743,
"end": 16871
} | class ____(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Standalone usage:
>>> y_true = [[2., 1.], [2., 3.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> mape(y_true, y_pred).numpy()
50.
>>> # Calling with 'sample_weight'.
>>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
20.
>>> # Using 'sum' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mape(y_true, y_pred).numpy()
100.
>>> # Using 'none' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mape(y_true, y_pred).numpy()
array([25., 75.], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
"""Initializes `MeanAbsolutePercentageError` instance.
Args:
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance. Defaults to
'mean_absolute_percentage_error'.
"""
super().__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
| MeanAbsolutePercentageError |
python | django__django | django/contrib/admindocs/views.py | {
"start": 15866,
"end": 19572
} | class ____(BaseAdminDocsView):
template_name = "admin_doc/template_detail.html"
def get_context_data(self, **kwargs):
template = self.kwargs["template"]
templates = []
try:
default_engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
directories = list(default_engine.dirs)
for loader in default_engine.template_loaders:
if hasattr(loader, "get_dirs"):
for dir_ in loader.get_dirs():
if dir_ not in directories:
directories.append(dir_)
for index, directory in enumerate(directories):
template_file = Path(safe_join(directory, template))
if template_file.exists():
template_contents = template_file.read_text()
else:
template_contents = ""
templates.append(
{
"file": template_file,
"exists": template_file.exists(),
"contents": template_contents,
"order": index,
}
)
return super().get_context_data(
**{
**kwargs,
"name": template,
"templates": templates,
}
)
####################
# Helper functions #
####################
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith("get_"):
if func_name.endswith("_list"):
return "List"
elif func_name.endswith("_count"):
return "Integer"
return ""
def get_readable_field_data_type(field):
"""
Return the description for a given field type, if it exists. Fields'
descriptions can contain format strings, which will be interpolated with
the values of field.__dict__ before being output.
"""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base="", namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a 4-tuple:
(view_func, regex, namespace, name)
"""
views = []
for p in urlpatterns:
if hasattr(p, "url_patterns"):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(
extract_views_from_urlpatterns(
patterns,
base + str(p.pattern),
(namespace or []) + (p.namespace and [p.namespace] or []),
)
)
elif hasattr(p, "callback"):
try:
views.append((p.callback, base + str(p.pattern), namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
def simplify_regex(pattern):
r"""
Clean up urlpattern regexes into something more readable by humans. For
example, turn "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "/<sport_slug>/athletes/<athlete_slug>/".
"""
pattern = remove_non_capturing_groups(pattern)
pattern = replace_named_groups(pattern)
pattern = replace_unnamed_groups(pattern)
pattern = replace_metacharacters(pattern)
if not pattern.startswith("/"):
pattern = "/" + pattern
return pattern
| TemplateDetailView |
python | django__django | django/contrib/messages/storage/fallback.py | {
"start": 195,
"end": 2093
} | class ____(BaseStorage):
"""
Try to store all messages in the first backend. Store any unstored
messages in each subsequent backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.storages = [
storage_class(*args, **kwargs) for storage_class in self.storage_classes
]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Get a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Store the messages and return any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response, remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| FallbackStorage |
python | openai__openai-python | src/openai/resources/containers/files/content.py | {
"start": 3033,
"end": 5492
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContentWithStreamingResponse(self)
async def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
| AsyncContent |
python | pytest-dev__pytest | testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_module_class.py | {
"start": 127,
"end": 279
} | class ____:
@pytest.fixture
def spam(self, spam):
return spam * 2
def test_spam(self, spam):
assert spam == "spamspam"
| TestSpam |
python | django__django | tests/gis_tests/geoapp/test_regress.py | {
"start": 288,
"end": 3961
} | class ____(TestCase):
fixtures = ["initial"]
def test_update(self):
"Testing QuerySet.update() (#10411)."
pueblo = City.objects.get(name="Pueblo")
bak = pueblo.point.clone()
pueblo.point.y += 0.005
pueblo.point.x += 0.005
City.objects.filter(name="Pueblo").update(point=pueblo.point)
pueblo.refresh_from_db()
self.assertAlmostEqual(bak.y + 0.005, pueblo.point.y, 6)
self.assertAlmostEqual(bak.x + 0.005, pueblo.point.x, 6)
City.objects.filter(name="Pueblo").update(point=bak)
pueblo.refresh_from_db()
self.assertAlmostEqual(bak.y, pueblo.point.y, 6)
self.assertAlmostEqual(bak.x, pueblo.point.x, 6)
def test_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = "Åland Islands"
places = [
{
"name": name,
"description": name,
"kml": "<Point><coordinates>5.0,23.0</coordinates></Point>",
}
]
render_to_kmz("gis/kml/placemarks.kml", {"places": places})
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name="Pueblo").point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name="Pueblo").aggregate(Extent("point"))[
"point__extent"
]
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
PennsylvaniaCity.objects.create(
name="Mansfield",
county="Tioga",
point="POINT(-77.071445 41.823881)",
founded=founded,
)
self.assertEqual(
founded, PennsylvaniaCity.objects.datetimes("founded", "day")[0]
)
self.assertEqual(
founded, PennsylvaniaCity.objects.aggregate(Min("founded"))["founded__min"]
)
def test_empty_count(self):
"""
Testing that PostGISAdapter.__eq__ does check empty strings. See
#13670.
"""
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name="Pueblo")
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
@skipUnlessDBFeature("allows_group_by_lob")
def test_defer_or_only_with_annotate(self):
"""
Regression for #16409. Make sure defer() and only() work with
annotate()
"""
self.assertIsInstance(
list(City.objects.annotate(Count("point")).defer("name")), list
)
self.assertIsInstance(
list(City.objects.annotate(Count("point")).only("name")), list
)
def test_boolean_conversion(self):
"""
Testing Boolean value conversion with the spatial backend, see #15169.
"""
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=t1.pk).val
val2 = Truth.objects.get(pk=t2.pk).val
# verify types -- shouldn't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertIs(val1, True)
self.assertIs(val2, False)
| GeoRegressionTests |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/kvstore/simple_kvstore.py | {
"start": 248,
"end": 1812
} | class ____(MutableMappingKVStore[dict]):
"""
Simple in-memory Key-Value store.
Args:
data (Optional[DATA_TYPE]): data to initialize the store with
"""
def __init__(
self,
data: Optional[DATA_TYPE] = None,
) -> None:
"""Init a SimpleKVStore."""
super().__init__(mapping_factory=dict)
if data is not None:
self._collections_mappings = data.copy()
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(json.dumps(self._collections_mappings))
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleKVStore":
"""Load a SimpleKVStore from a persist path and filesystem."""
fs = fs or fsspec.filesystem("file")
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data = json.load(f)
return cls(data)
def to_dict(self) -> dict:
"""Save the store as dict."""
return self._collections_mappings.copy()
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleKVStore":
"""Load a SimpleKVStore from dict."""
return cls(save_dict)
| SimpleKVStore |
python | chroma-core__chroma | chromadb/telemetry/opentelemetry/grpc.py | {
"start": 698,
"end": 3731
} | class ____(
grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
def _intercept_call(self, continuation, client_call_details, request_or_iterator):
from chromadb.telemetry.opentelemetry import tracer
if tracer is None:
return continuation(client_call_details, request_or_iterator)
with tracer.start_as_current_span(
f"RPC {client_call_details.method}", kind=SpanKind.CLIENT
) as span:
# Prepare metadata for propagation
metadata = (
client_call_details.metadata[:] if client_call_details.metadata else []
)
metadata.extend(
[
(
"chroma-traceid",
_encode_trace_id(span.get_span_context().trace_id),
),
("chroma-spanid", _encode_span_id(span.get_span_context().span_id)),
]
)
# Update client call details with new metadata
new_client_details = _ClientCallDetails(
client_call_details.method,
client_call_details.timeout,
tuple(metadata), # Ensure metadata is a tuple
client_call_details.credentials,
)
try:
result = continuation(new_client_details, request_or_iterator)
# Set attributes based on the result
if hasattr(result, "details") and result.details():
span.set_attribute("rpc.detail", result.details())
span.set_attribute("rpc.status_code", result.code().name.lower())
span.set_attribute("rpc.status_code_value", result.code().value[0])
# Set span status based on gRPC call result
if result.code() != grpc.StatusCode.OK:
span.set_status(StatusCode.ERROR, description=str(result.code()))
return result
except Exception as e:
# Log exception details and re-raise
span.set_attribute("rpc.error", str(e))
span.set_status(StatusCode.ERROR, description=str(e))
raise
def intercept_unary_unary(self, continuation, client_call_details, request):
return self._intercept_call(continuation, client_call_details, request)
def intercept_unary_stream(self, continuation, client_call_details, request):
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(continuation, client_call_details, request_iterator)
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(continuation, client_call_details, request_iterator)
| OtelInterceptor |
python | numba__numba | numba/tests/test_ir.py | {
"start": 17395,
"end": 20239
} | class ____(TestCase):
def test_var_in_scope_assumption(self):
# Create a pass that clears ir.Scope in ir.Block
@register_pass(mutates_CFG=False, analysis_only=False)
class RemoveVarInScope(FunctionPass):
_name = "_remove_var_in_scope"
def __init__(self):
FunctionPass.__init__(self)
# implement method to do the work, "state" is the internal compiler
# state from the CompilerBase instance.
def run_pass(self, state):
func_ir = state.func_ir
# walk the blocks
for blk in func_ir.blocks.values():
oldscope = blk.scope
# put in an empty Scope
blk.scope = ir.Scope(parent=oldscope.parent,
loc=oldscope.loc)
return True
# Create a pass that always fails, to stop the compiler
@register_pass(mutates_CFG=False, analysis_only=False)
class FailPass(FunctionPass):
_name = "_fail"
def __init__(self, *args, **kwargs):
FunctionPass.__init__(self)
def run_pass(self, state):
# This is unreachable. SSA pass should have raised before this
# pass when run with `error.NumbaPedanticWarning`s raised as
# errors.
raise AssertionError("unreachable")
class MyCompiler(CompilerBase):
def define_pipelines(self):
pm = PassManager("testing pm")
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(IRProcessing, "processing IR")
pm.add_pass(RemoveVarInScope, "_remove_var_in_scope")
pm.add_pass(ReconstructSSA, "ssa")
pm.add_pass(FailPass, "_fail")
pm.finalize()
return [pm]
@njit(pipeline_class=MyCompiler)
def dummy(x):
# To trigger SSA and the pedantic check, this function must have
# multiple assignments to the same variable in different blocks.
a = 1
b = 2
if a < b:
a = 2
else:
b = 3
return a, b
with warnings.catch_warnings():
# Make NumbaPedanticWarning an error
warnings.simplefilter("error", errors.NumbaPedanticWarning)
# Catch NumbaIRAssumptionWarning
with self.assertRaises(errors.NumbaIRAssumptionWarning) as raises:
dummy(1)
# Verify the error message
self.assertRegex(
str(raises.exception),
r"variable '[a-z]' is not in scope",
)
if __name__ == '__main__':
unittest.main()
| TestIRPedanticChecks |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_semiprime.py | {
"start": 1822,
"end": 4107
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid semiprime codes."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_semiprime": [
"4",
"6",
"95",
"1679",
"297159",
],
"malformed_semiprime": [
"1",
"5",
"100",
"297160",
"This is not a valid semiprime number",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_semiprime"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_semiprime"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_semiprime"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["primefac"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidSemiprime().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidSemiprime |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 1144,
"end": 3639
} | class ____(ASTBaseBase):
def __init__(self, name: str) -> None:
if not isinstance(name, str) or len(name) == 0:
raise AssertionError
self.name = sys.intern(name)
self.is_anonymous = name[0] == '@'
# ASTBaseBase already implements this method,
# but specialising it here improves performance
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTIdentifier):
return NotImplemented
return self.name == other.name
def __hash__(self) -> int:
return hash((self.name, self.is_anonymous))
def is_anon(self) -> bool:
return self.is_anonymous
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.name
def get_display_string(self) -> str:
return '[anonymous]' if self.is_anonymous else self.name
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.get_display_string())
def describe_signature(
self,
signode: TextElement,
mode: str,
env: BuildEnvironment,
prefix: str,
symbol: Symbol,
) -> None:
# note: slightly different signature of describe_signature due to the prefix
verify_description_mode(mode)
if self.is_anonymous:
node = addnodes.desc_sig_name(text='[anonymous]')
else:
node = addnodes.desc_sig_name(self.name, self.name)
if mode == 'markType':
target_text = prefix + self.name
pnode = addnodes.pending_xref(
'',
refdomain='c',
reftype='identifier',
reftarget=target_text,
modname=None,
classname=None,
)
pnode['c:parent_key'] = symbol.get_lookup_key()
pnode += node
signode += pnode
elif mode == 'lastIsName':
name_node = addnodes.desc_name()
name_node += node
signode += name_node
elif mode == 'noneIsName':
signode += node
else:
raise Exception('Unknown description mode: %s' % mode)
@property
def identifier(self) -> str:
warnings.warn(
'`ASTIdentifier.identifier` is deprecated, use `ASTIdentifier.name` instead',
DeprecationWarning,
stacklevel=2,
)
return self.name
| ASTIdentifier |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 97082,
"end": 97295
} | class ____(AccessControlRequestForUser, AccessControlRequestForGroup):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
| AccessControlRequest |
python | ansible__ansible | lib/ansible/parsing/vault/__init__.py | {
"start": 17076,
"end": 21529
} | class ____(ScriptVaultSecret):
VAULT_ID_UNKNOWN_RC = 2
def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
super(ClientScriptVaultSecret, self).__init__(filename=filename,
encoding=encoding,
loader=loader)
self._vault_id = vault_id
display.vvvv(u'Executing vault password client script: %s --vault-id %s' % (to_text(filename), to_text(vault_id)))
def _run(self, command):
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password client script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
(self.filename, self._vault_id, stderr))
if popen.returncode != 0:
raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
(self.filename, popen.returncode, self._vault_id, stderr))
def _build_command(self):
command = [self.filename]
if self._vault_id:
command.extend(['--vault-id', self._vault_id])
return command
def __repr__(self):
if self.filename:
return "%s(filename='%s', vault_id='%s')" % \
(self.__class__.__name__, self.filename, self._vault_id)
return "%s()" % (self.__class__.__name__)
def match_secrets(secrets, target_vault_ids):
"""Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets"""
if not secrets:
return []
matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
return matches
def match_best_secret(secrets, target_vault_ids):
"""Find the best secret from secrets that matches target_vault_ids
Since secrets should be ordered so the early secrets are 'better' than later ones, this
just finds all the matches, then returns the first secret"""
matches = match_secrets(secrets, target_vault_ids)
if matches:
return matches[0]
# raise exception?
return None
def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
# See if the --encrypt-vault-id matches a vault-id
display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
if encrypt_vault_id is None:
raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
encrypt_vault_id_matchers = [encrypt_vault_id]
encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
# return the best match for --encrypt-vault-id
if encrypt_secret:
return encrypt_secret
# If we specified a encrypt_vault_id and we couldn't find it, dont
# fallback to using the first/best secret
raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
[_v for _v, _vs in secrets]))
def match_encrypt_secret(secrets, encrypt_vault_id=None):
"""Find the best/first/only secret in secrets to use for encrypting"""
display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
# See if the --encrypt-vault-id matches a vault-id
if encrypt_vault_id:
return match_encrypt_vault_id_secret(secrets,
encrypt_vault_id=encrypt_vault_id)
# Find the best/first secret from secrets since we didn't specify otherwise
# ie, consider all the available secrets as matches
_vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
best_secret = match_best_secret(secrets, _vault_id_matchers)
# can be empty list sans any tuple
return best_secret
| ClientScriptVaultSecret |
python | pytorch__pytorch | test/distributed/_shard/sharding_plan/test_sharding_plan.py | {
"start": 1696,
"end": 5399
} | class ____(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharding_plan_errors(self):
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)[0]
sharding_plan_wrong_plan = ShardingPlan(
plan={
"fc1.weight": torch.randn(3, 4),
},
output_plan={"": rowwise_sharding_spec},
)
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]]).cuda(self.rank)
with self.assertRaisesRegex(
TypeError, "Only `ShardingSpec` and `Sharder` are supported to shard"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_plan)
sharding_plan_wrong_output_plan = ShardingPlan(
plan={
"fc1.weight": rowwise_sharding_spec,
},
output_plan={"": torch.randn(3, 4)},
)
with self.assertRaisesRegex(
TypeError, "Only `ShardingSpec` is supported as output_plan"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_output_plan)
sharding_plan_wrong_module_path = ShardingPlan(
plan={
"fc3.weight": rowwise_sharding_spec,
},
)
with self.assertRaisesRegex(AttributeError, "has no attribute"):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_module_path)
sharding_plan_wrong_param_path = ShardingPlan(
plan={
"fc1.biass": rowwise_sharding_spec,
},
)
with self.assertRaisesRegex(AttributeError, "has no attribute"):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_param_path)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_custom_sharding_planner(self):
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank).cuda(
self.rank
)
planner = ChunkAllShardingPlanner(device_count=TEST_GPU_NUM)
sharding_plan = planner.build_plan(megatron_lm)
shard_module(megatron_lm, sharding_plan)
# check to make sure the module already been sharded
self.assertTrue(isinstance(megatron_lm.fc1.weight, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc2.weight, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc1.bias, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc2.bias, ShardedTensor))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_shard_module_sub_process_group(self):
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank)
colwise_sharding_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
rowwise_sharding_spec = ChunkShardingSpec(
dim=1,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
sharding_plan = ShardingPlan(
plan={
"fc1.weight": colwise_sharding_spec,
"fc2.weight": rowwise_sharding_spec,
}
)
pg = dist.new_group([2, 3])
if self.rank >= 2:
shard_module(megatron_lm, sharding_plan, process_group=pg)
if __name__ == "__main__":
run_tests()
| TestShardingPlan |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 108860,
"end": 118388
} | class ____(TestCase):
def test_all(self):
iterable = ['0', '1', '2', '3', '4', '5']
indexes = [*range(-4, 10), None]
steps = [1, 2, 3, 4, -1, -2, -3, -4]
for slice_args in product(indexes, indexes, steps):
with self.subTest(slice_args=slice_args):
actual = list(mi.islice_extended(iterable, *slice_args))
expected = iterable[slice(*slice_args)]
self.assertEqual(actual, expected, slice_args)
def test_zero_step(self):
with self.assertRaises(ValueError):
list(mi.islice_extended([1, 2, 3], 0, 1, 0))
def test_slicing(self):
iterable = map(str, count())
first_slice = mi.islice_extended(iterable)[10:]
second_slice = mi.islice_extended(first_slice)[:10]
third_slice = mi.islice_extended(second_slice)[::2]
self.assertEqual(list(third_slice), ['10', '12', '14', '16', '18'])
def test_slicing_extensive(self):
iterable = range(10)
options = (None, 1, 2, 7, -1)
for start, stop, step in product(options, options, options):
with self.subTest(slice_args=(start, stop, step)):
sliced_tuple_0 = tuple(
mi.islice_extended(iterable)[start:stop:step]
)
sliced_tuple_1 = tuple(
mi.islice_extended(iterable, start, stop, step)
)
sliced_range = tuple(iterable[start:stop:step])
self.assertEqual(sliced_tuple_0, sliced_range)
self.assertEqual(sliced_tuple_1, sliced_range)
def test_invalid_slice(self):
with self.assertRaises(TypeError):
mi.islice_extended(count())[13]
def test_elements_lifecycle(self):
# CPython does reference counting.
# GC is not required when ref counting is supported.
refCountSupported = platform.python_implementation() == 'CPython'
class TestCase(NamedTuple):
initialSize: int
slice: int
# list of expected intermediate elements states (alive or not)
# during a complete iteration
expectedAliveStates: list[list[int]]
# fmt: off
testCases = [
# testcases for: start>0, stop>0, step>0
TestCase(initialSize=3, slice=(None, None, 1), expectedAliveStates=[ # noqa: E501
[1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0], [0, 0, 0]]),
TestCase(initialSize=3, slice=(0, None, 1), expectedAliveStates=[
[1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0], [0, 0, 0]]),
TestCase(initialSize=3, slice=(1, 2, 1), expectedAliveStates=[
[1, 1, 1], [0, 0, 1], [0, 0, 1]]),
TestCase(initialSize=4, slice=(0, None, 2), expectedAliveStates=[
[1, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]),
TestCase(initialSize=5, slice=(1, 4, 2), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1]]), # noqa: E501
TestCase(initialSize=5, slice=(4, 1, 1), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 0, 0, 1]]),
# FYI: to process a negative start/stop index, we need to iterate
# on the whole iterator. All the elements will be consumed
# and will ALWAYS be released on full iteration completion.
# testcases for: start<0, stop>0, step>0
TestCase(initialSize=3, slice=(-3, None, 1), expectedAliveStates=[
[1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0], [0, 0, 0]]),
TestCase(initialSize=3, slice=(-2, 2, 1), expectedAliveStates=[
[1, 1, 1], [0, 0, 1], [0, 0, 0]]),
TestCase(initialSize=4, slice=(-4, None, 2), expectedAliveStates=[
[1, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]),
TestCase(initialSize=5, slice=(-4, 4, 2), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=3, slice=(-2, 0, 1), expectedAliveStates=[
[1, 1, 1], [0, 0, 0]]),
# testcases for: start>0, stop<0, step>0
TestCase(initialSize=3, slice=(None, -1, 1), expectedAliveStates=[
[1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]]),
TestCase(initialSize=4, slice=(1, -1, 1), expectedAliveStates=[
[1, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]),
TestCase(initialSize=5, slice=(None, -2, 2), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 1, 1, 1, 1], [0, 0, 0, 1, 1], [0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=5, slice=(1, -1, 2), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=5, slice=(4, -5, 2), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]),
# testcases for: start>0, stop>0, step<0
TestCase(initialSize=3, slice=(None, None, -1), expectedAliveStates=[ # noqa: E501
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]),
TestCase(initialSize=3, slice=(2, None, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]),
TestCase(initialSize=3, slice=(None, 0, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [0, 1, 1], [0, 1, 1], [0, 0, 0]]),
TestCase(initialSize=6, slice=(3, 1, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1]]), # noqa: E501
TestCase(initialSize=5, slice=(1, 3, -1), expectedAliveStates=[
# ⚠️could be improved. Final state could be [0, 0, 1, 1, 1]
[1, 1, 1, 1, 1], [0, 0, 0, 0, 1]]),
# testcases for: start<0, stop>0, step<0
TestCase(initialSize=3, slice=(-1, None, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]),
TestCase(initialSize=3, slice=(-1, 0, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [0, 1, 1], [0, 1, 1], [0, 0, 0]]),
TestCase(initialSize=6, slice=(-2, None, -2), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=6, slice=(-2, 1, -2), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=6, slice=(-4, 4, -2), expectedAliveStates=[
[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]),
# testcases for: start>0, stop<0, step<0
TestCase(initialSize=3, slice=(None, -3, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [0, 1, 1], [0, 1, 1], [0, 0, 0]]),
TestCase(initialSize=3, slice=(None, -4, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]),
TestCase(initialSize=5, slice=(3, -4, -1), expectedAliveStates=[
# ⚠️could be improved, elements are only released on final step
[1, 1, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 0, 0, 0]]), # noqa: E501
TestCase(initialSize=5, slice=(1, -1, -1), expectedAliveStates=[
[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]),
]
# fmt: on
for index, testCase in enumerate(testCases):
with self.subTest(f"{index:02d}", testCase=testCase):
iterator = IteratorWithWeakReferences.FROM_SIZE(
testCase.initialSize
)
islice_iterator = mi.islice_extended(iterator, *testCase.slice)
aliveStates = []
refCountSupported or gc.collect()
# initial alive states
aliveStates.append(iterator.weakReferencesState())
while True:
try:
next(islice_iterator)
refCountSupported or gc.collect()
# intermediate alive states
aliveStates.append(iterator.weakReferencesState())
except StopIteration:
refCountSupported or gc.collect()
# final alive states
aliveStates.append(iterator.weakReferencesState())
break
self.assertEqual(aliveStates, testCase.expectedAliveStates)
| IsliceExtendedTests |
python | scipy__scipy | scipy/integrate/tests/test_cubature.py | {
"start": 31476,
"end": 32601
} | class ____:
"""
Tests related to the general Rule interface (currently private).
"""
@pytest.mark.parametrize("problem", [
(
# 2D problem, 1D rule
[0, 0],
[1, 1],
GaussKronrodQuadrature,
(21,),
),
(
# 1D problem, 2D rule
[0],
[1],
GenzMalikCubature,
(2,),
)
])
def test_incompatible_dimension_raises_error(self, problem, xp):
a, b, quadrature, quadrature_args = problem
rule = quadrature(*quadrature_args, xp=xp)
a = xp.asarray(a, dtype=xp.float64)
b = xp.asarray(b, dtype=xp.float64)
with pytest.raises(Exception, match="incompatible dimension"):
rule.estimate(basic_1d_integrand, a, b, args=(xp,))
def test_estimate_with_base_classes_raise_error(self, xp):
a = xp.asarray([0])
b = xp.asarray([1])
for base_class in [Rule(), FixedRule()]:
with pytest.raises(Exception):
base_class.estimate(basic_1d_integrand, a, b, args=(xp,))
| TestRules |
python | doocs__leetcode | solution/1900-1999/1968.Array With Elements Not Equal to Average of Neighbors/Solution.py | {
"start": 0,
"end": 304
} | class ____:
def rearrangeArray(self, nums: List[int]) -> List[int]:
nums.sort()
n = len(nums)
m = (n + 1) // 2
ans = []
for i in range(m):
ans.append(nums[i])
if i + m < n:
ans.append(nums[i + m])
return ans
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_timedelta.py | {
"start": 147,
"end": 1938
} | class ____:
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(next(iter(result.values()))[0], Timedelta)
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range("1 day", periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype=np.int64)
tm.assert_index_equal(result, exp)
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype=np.int64))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype=np.int32),
)
tm.assert_index_equal(
rng.microseconds,
Index([100 * 1000 + 123, 100 * 1000 + 123], dtype=np.int32),
)
tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype=np.int32))
msg = "'TimedeltaIndex' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=range(2)))
tm.assert_series_equal(
s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=range(2))
)
# preserve name (GH15589)
rng.name = "name"
assert rng.days.name == "name"
| TestTimedeltaIndex |
python | python-pillow__Pillow | src/PIL/XVThumbImagePlugin.py | {
"start": 972,
"end": 2115
} | class ____(ImageFile.ImageFile):
format = "XVThumb"
format_description = "XV thumbnail image"
def _open(self) -> None:
# check magic
assert self.fp is not None
if not _accept(self.fp.read(6)):
msg = "not an XV thumbnail file"
raise SyntaxError(msg)
# Skip to beginning of next line
self.fp.readline()
# skip info comments
while True:
s = self.fp.readline()
if not s:
msg = "Unexpected EOF reading XV thumbnail file"
raise SyntaxError(msg)
if s[0] != 35: # ie. when not a comment: '#'
break
# parse header line (already read)
s = s.strip().split()
self._mode = "P"
self._size = int(s[0]), int(s[1])
self.palette = ImagePalette.raw("RGB", PALETTE)
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, self.fp.tell(), self.mode)
]
# --------------------------------------------------------------------
Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept)
| XVThumbImageFile |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 108560,
"end": 110231
} | class ____(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, q, k, v):
batch_size, q_sequence_length, num_channels = q.shape
if not k.shape == v.shape:
raise ValueError(f"keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!")
batch_size, k_sequence_length, num_channels = k.shape
q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads)
k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
attn = torch.einsum("bnkc,bmkc->bknm", q, k) * self.scale
attn = attn.softmax(dim=-1)
output = torch.einsum("bknm,bmkc->bnkc", attn, v).reshape(batch_size, q_sequence_length, num_channels)
output = self.proj(output)
output = self.proj_drop(output)
return output
| OneFormerTextMapperAttention |
python | jazzband__django-oauth-toolkit | tests/test_auth_backends.py | {
"start": 3483,
"end": 5881
} | class ____(BaseTest):
def dummy_get_response(self, request):
return HttpResponse()
def test_middleware_wrong_headers(self):
m = OAuth2TokenMiddleware(self.dummy_get_response)
request = self.factory.get("/a-resource")
m(request)
self.assertFalse(hasattr(request, "user"))
auth_headers = {
"HTTP_AUTHORIZATION": "Beerer " + "badstring", # a Beer token for you!
}
request = self.factory.get("/a-resource", **auth_headers)
m(request)
self.assertFalse(hasattr(request, "user"))
def test_middleware_user_is_set(self):
m = OAuth2TokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
request.user = self.user
m(request)
self.assertIs(request.user, self.user)
request.user = AnonymousUser()
m(request)
self.assertEqual(request.user.pk, self.user.pk)
def test_middleware_success(self):
m = OAuth2TokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
m(request)
self.assertEqual(request.user, self.user)
def test_middleware_response(self):
m = OAuth2TokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
response = m(request)
self.assertIsInstance(response, HttpResponse)
def test_middleware_response_header(self):
m = OAuth2TokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
response = m(request)
self.assertIn("Vary", response)
self.assertIn("Authorization", response["Vary"])
@override_settings(
AUTHENTICATION_BACKENDS=(
"oauth2_provider.backends.OAuth2Backend",
"django.contrib.auth.backends.ModelBackend",
),
)
@modify_settings(
MIDDLEWARE={
"append": "oauth2_provider.middleware.OAuth2TokenMiddleware",
}
)
| TestOAuth2Middleware |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 6776,
"end": 6955
} | class ____(scale_position_discrete):
"""
Discrete x position
"""
_aesthetics = ["x", "xmin", "xmax", "xend", "xintercept"]
@dataclass(kw_only=True)
| scale_x_discrete |
python | falconry__falcon | tests/test_testing.py | {
"start": 167,
"end": 7030
} | class ____:
def items(self):
return [('foo', 'bar'), ('baz', 'foo')]
def another_dummy_wsgi_app(environ, start_response):
start_response(status_codes.HTTP_OK, [('Content-Type', 'text/plain')])
yield b'It works!'
def test_testing_client_handles_wsgi_generator_app():
client = testing.TestClient(another_dummy_wsgi_app)
response = client.simulate_get('/nevermind')
assert response.status == status_codes.HTTP_OK
assert response.text == 'It works!'
@pytest.mark.parametrize(
'items',
[
(),
(b'1',),
(b'1', b'2'),
(b'Hello, ', b'World', b'!\n'),
],
)
def test_closed_wsgi_iterable(items):
assert tuple(testing.closed_wsgi_iterable(items)) == items
@pytest.mark.parametrize(
'version, valid',
[
('1', True),
('1.0', True),
('1.1', True),
('2', True),
('2.0', True),
('', False),
('0', False),
('1.2', False),
('2.1', False),
('3', False),
('3.1', False),
('11', False),
('22', False),
],
)
def test_simulate_request_http_version(version, valid):
app = App()
if valid:
testing.simulate_request(app, http_version=version)
else:
with pytest.raises(ValueError):
testing.simulate_request(app, http_version=version)
def test_simulate_request_content_type():
class Foo:
def on_post(self, req, resp):
resp.text = req.content_type
app = App()
app.add_route('/', Foo())
headers = {'Content-Type': falcon.MEDIA_TEXT}
result = testing.simulate_post(app, '/', headers=headers)
assert result.text == falcon.MEDIA_TEXT
result = testing.simulate_post(app, '/', content_type=falcon.MEDIA_HTML)
assert result.text == falcon.MEDIA_HTML
result = testing.simulate_post(
app, '/', content_type=falcon.MEDIA_HTML, headers=headers
)
assert result.text == falcon.MEDIA_HTML
result = testing.simulate_post(app, '/', json={})
assert result.text == falcon.MEDIA_JSON
result = testing.simulate_post(app, '/', json={}, content_type=falcon.MEDIA_HTML)
assert result.text == falcon.MEDIA_JSON
result = testing.simulate_post(app, '/', json={}, headers=headers)
assert result.text == falcon.MEDIA_JSON
result = testing.simulate_post(
app, '/', json={}, headers=headers, content_type=falcon.MEDIA_HTML
)
assert result.text == falcon.MEDIA_JSON
@pytest.mark.parametrize('mode', ['wsgi', 'asgi', 'asgi-stream'])
def test_content_type(util, mode):
class Responder:
def on_get(self, req, resp):
resp.content_type = req.content_type
app = util.create_app('asgi' in mode)
app.add_route('/', Responder())
if 'stream' in mode:
async def go():
async with testing.ASGIConductor(app) as ac:
async with ac.simulate_get_stream(
'/', content_type='my-content-type'
) as r:
assert r.content_type == 'my-content-type'
return 1
assert async_to_sync(go) == 1
else:
client = testing.TestClient(app)
res = client.simulate_get('/', content_type='foo-content')
assert res.content_type == 'foo-content'
@pytest.mark.parametrize('cookies', [{'foo': 'bar', 'baz': 'foo'}, CustomCookies()])
def test_create_environ_cookies(cookies):
environ = testing.create_environ(cookies=cookies)
assert environ['HTTP_COOKIE'] in ('foo=bar; baz=foo', 'baz=foo; foo=bar')
def test_create_environ_cookies_options_method():
environ = testing.create_environ(method='OPTIONS', cookies={'foo': 'bar'})
assert 'HTTP_COOKIE' not in environ
def test_cookies_jar():
class Foo:
def on_get(self, req, resp):
# NOTE(myusko): In the future we shouldn't change the cookie
# a test depends on the input.
# NOTE(kgriffs): This is the only test that uses a single
# cookie (vs. multiple) as input; if this input ever changes,
# a separate test will need to be added to explicitly verify
# this use case.
resp.set_cookie('has_permission', 'true')
def on_post(self, req, resp):
if req.cookies['has_permission'] == 'true':
resp.status = falcon.HTTP_200
else:
resp.status = falcon.HTTP_403
app = App()
app.add_route('/jars', Foo())
client = testing.TestClient(app)
response_one = client.simulate_get('/jars')
response_two = client.simulate_post('/jars', cookies=response_one.cookies)
assert response_two.status == falcon.HTTP_200
def test_create_environ_default_ua():
default_ua = 'falcon-client/' + falcon.__version__
environ = testing.create_environ()
assert environ['HTTP_USER_AGENT'] == default_ua
req = falcon.request.Request(environ)
assert req.user_agent == default_ua
def test_create_environ_default_ua_override():
ua = 'curl/7.64.1'
environ = testing.create_environ(headers={'user-agent': ua})
assert environ['HTTP_USER_AGENT'] == ua
req = falcon.request.Request(environ)
assert req.user_agent == ua
def test_create_environ_preserve_raw_uri():
uri = '/cache/http%3A%2F%2Ffalconframework.org/status'
environ = testing.create_environ(path=uri)
assert environ['PATH_INFO'] == '/cache/http://falconframework.org/status'
assert environ['RAW_URI'] == uri
def test_missing_header_is_none():
req = testing.create_req()
assert req.auth is None
@pytest.mark.parametrize(
'method', ['DELETE', 'GET', 'HEAD', 'LOCK', 'OPTIONS', 'PATCH', 'POST', 'PUT']
)
def test_client_simulate_aliases(asgi, method, util):
def capture_method(req, resp):
resp.content_type = falcon.MEDIA_TEXT
resp.text = req.method
app = util.create_app(asgi)
app.add_sink(capture_method)
client = testing.TestClient(app)
if method == 'LOCK':
result = client.request(method, '/')
else:
simulate_alias = getattr(client, method.lower())
result = simulate_alias('/')
assert result.status_code == 200
expected = '' if method == 'HEAD' else method
assert result.text == expected
def test_deprecated_httpnow():
with pytest.warns(
falcon.util.DeprecatedWarning, match='Use `falcon.util.http_now` instead.'
):
now = testing.httpnow()
assert now
def test_deprecated_redirected():
with pytest.warns(
falcon.util.DeprecatedWarning,
match='Please use contextlib.redirect_stdout and '
'contextlib.redirect_stderr instead.',
):
output = io.StringIO()
with testing.redirected(stdout=output):
print('test output')
assert output.getvalue() == 'test output\n'
| CustomCookies |
python | getsentry__sentry | src/sentry/new_migrations/monkey/state.py | {
"start": 275,
"end": 344
} | class ____(Enum):
MOVE_TO_PENDING = 0
DELETE = 1
| DeletionAction |
python | getlogbook__logbook | benchmark/bench_enabled_introspection.py | {
"start": 130,
"end": 347
} | class ____(NullHandler):
blackhole = False
def run():
with Flags(introspection=True):
with DummyHandler():
for _ in range(500):
log.warning("this is not handled")
| DummyHandler |
python | astropy__astropy | astropy/cosmology/_src/io/builtin/model.py | {
"start": 1867,
"end": 10627
} | class ____(FittableModel, Generic[_CosmoT]):
"""Base class for Cosmology redshift-method Models.
.. note::
This class is not publicly scoped so should not be used directly.
Instead, from a Cosmology instance use ``.to_format("astropy.model")``
to create an instance of a subclass of this class.
`_CosmologyModel` (subclasses) wrap a redshift-method of a
:class:`~astropy.cosmology.Cosmology` class, converting each non-`None`
|Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the redshift-method to the model's ``__call__ / evaluate``.
See Also
--------
astropy.cosmology.Cosmology.to_format
"""
@abc.abstractmethod
def _cosmology_class(self) -> type[_CosmoT]:
"""Cosmology class as a private attribute.
Set in subclasses.
"""
@abc.abstractmethod
def _method_name(self) -> str:
"""Cosmology method name as a private attribute.
Set in subclasses.
"""
@classproperty
def cosmology_class(cls) -> type[_CosmoT]:
"""|Cosmology| class."""
return cls._cosmology_class
@classproperty(lazy=True)
def _cosmology_class_sig(cls):
"""Signature of |Cosmology| class."""
return inspect.signature(cls._cosmology_class)
@property
def cosmology(self) -> _CosmoT:
"""Return |Cosmology| using `~astropy.modeling.Parameter` values."""
return self._cosmology_class(
name=self.name,
**{
k: (v.value if not (v := getattr(self, k)).unit else v.quantity)
for k in self.param_names
},
)
@classproperty
def method_name(self) -> str:
"""Redshift-method name on |Cosmology| instance."""
return self._method_name
# ---------------------------------------------------------------
# NOTE: cannot add type annotations b/c of how Model introspects
def evaluate(self, *args, **kwargs):
"""Evaluate method {method!r} of {cosmo_cls!r} Cosmology.
The Model wraps the :class:`~astropy.cosmology.Cosmology` method,
converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
(unless the Parameter is None, in which case it is skipped).
Here an instance of the cosmology is created using the current
Parameter values and the method is evaluated given the input.
Parameters
----------
*args, **kwargs
The first ``n_inputs`` of ``*args`` are for evaluating the method
of the cosmology. The remaining args and kwargs are passed to the
cosmology class constructor.
Any unspecified Cosmology Parameter use the current value of the
corresponding Model Parameter.
Returns
-------
Any
Results of evaluating the Cosmology method.
"""
# TODO: speed up using ``replace``
# create BoundArgument with all available inputs beyond the Parameters,
# which will be filled in next
ba = self._cosmology_class_sig.bind_partial(*args[self.n_inputs :], **kwargs)
# fill in missing Parameters
for k in self.param_names:
if k not in ba.arguments:
v = getattr(self, k)
ba.arguments[k] = v.value if not v.unit else v.quantity
# unvectorize, since Cosmology is not vectorized
# TODO! remove when vectorized
if np.shape(ba.arguments[k]): # only in __call__
# m_nu is a special case # TODO! fix by making it 'structured'
if k == "m_nu" and len(ba.arguments[k].shape) == 1:
continue
ba.arguments[k] = ba.arguments[k][0]
# make instance of cosmology
cosmo = self._cosmology_class(**ba.arguments)
# evaluate method
return getattr(cosmo, self._method_name)(*args[: self.n_inputs])
##############################################################################
def from_model(model: _CosmologyModel[_CosmoT]) -> _CosmoT:
"""Load |Cosmology| from `~astropy.modeling.Model` object.
Parameters
----------
model : `_CosmologyModel` subclass instance
See ``Cosmology.to_format.help("astropy.model") for details.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
>>> from astropy.cosmology import Cosmology, Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> print(Cosmology.from_format(model))
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
cosmo = model.cosmology
# assemble the metadata
meta = copy.deepcopy(model.meta)
for n in model.param_names:
p = getattr(model, n)
meta[p.name] = {
n: getattr(p, n)
for n in dir(p)
if not (n.startswith("_") or callable(getattr(p, n)))
}
return replace(cosmo, meta=meta)
def to_model(cosmology: _CosmoT, *_: object, method: str) -> _CosmologyModel[_CosmoT]:
"""Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
method : str, keyword-only
The name of the method on the ``cosmology``.
Returns
-------
`_CosmologyModel` subclass instance
The Model wraps the |Cosmology| method, converting each non-`None`
:class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the method to the model's ``__call__ / evaluate``.
Examples
--------
>>> from astropy.cosmology import Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> model
<FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897,
name='Planck18')>
"""
cosmo_cls = cosmology.__class__
# get bound method & sig from cosmology (unbound if class).
if not hasattr(cosmology, method):
raise AttributeError(f"{method} is not a method on {cosmology.__class__}.")
func = getattr(cosmology, method)
if not callable(func):
raise ValueError(f"{cosmology.__class__}.{method} is not callable.")
msig = inspect.signature(func)
# introspect for number of positional inputs, ignoring "self"
n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))])
attrs = {} # class attributes
attrs["_cosmology_class"] = cosmo_cls
attrs["_method_name"] = method
attrs["n_inputs"] = n_inputs
attrs["n_outputs"] = 1
params = {
k: convert_parameter_to_model_parameter(
cosmo_cls.parameters[k], v, meta=cosmology.meta.get(k)
)
for k, v in cosmology.parameters.items()
if v is not None
}
# class name is cosmology name + Cosmology + method name + Model
clsname = (
cosmo_cls.__qualname__.replace(".", "_")
+ "Cosmology"
+ method.replace("_", " ").title().replace(" ", "")
+ "Model"
)
# make Model class
CosmoModel = type(clsname, (_CosmologyModel,), {**attrs, **params})
# override __signature__ and format the doc.
CosmoModel.evaluate.__signature__ = msig
if CosmoModel.evaluate.__doc__ is not None:
# guard against PYTHONOPTIMIZE mode
CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format(
cosmo_cls=cosmo_cls.__qualname__, method=method
)
# instantiate class using default values
return CosmoModel(
**cosmology.parameters, name=cosmology.name, meta=copy.deepcopy(cosmology.meta)
)
def model_identify(
origin: str, format: str | None, *args: object, **kwargs: object
) -> bool:
"""Identify if object uses the :class:`~astropy.modeling.Model` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Model) and (format in (None, "astropy.model"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.model", Cosmology, from_model)
convert_registry.register_writer("astropy.model", Cosmology, to_model)
convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
| _CosmologyModel |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/pooling_ops_test.py | {
"start": 6210,
"end": 97295
} | class ____(test.TestCase, parameterized.TestCase):
def _isMaxPool(self, func):
return func in (nn_ops.max_pool, nn_ops.max_pool_v2)
def _VerifyOneType(
self,
pool_func,
input_sizes,
ksize,
strides,
padding,
data_format,
data_type,
expected,
use_gpu,
v2,
use_negative_input=False,
bfloat16_rtol=1e-2,
):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua
version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
data_type: The data type to use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
use_negative_input: If the input values should be negative.
bfloat16_rtol: relative tolerance for bfloat16.
"""
# Check that this test is compatible with the hardware we have. (Really
# this should be done in GetTestConfigsDicts(), but when that runs, we
# haven't initialized enough of TF to know what our hardware is!)
if use_gpu and not test.is_gpu_available():
self.skipTest("No GPU is available.")
if use_gpu and data_type == dtypes.float64 and test.is_built_with_rocm():
self.skipTest("ROCm pooling ops don't support float64.")
if use_gpu and data_format == "NCHW_VECT_C" and not test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
self.skipTest("NCHW_VECT_C requires sm61+.")
if v2 and data_format != "NHWC":
self.skipTest("v2 not supported for %s" % data_format)
if v2 and not isinstance(padding, str):
self.skipTest("non-constant ksize/strides requires nonexplicit padding")
if data_format == "NCHW_VECT_C":
if data_type != dtypes.float32:
self.skipTest("quantization to qint8 not implemented for %r" %
data_type)
if input_sizes[-1] % 4 != 0:
self.skipTest("Skipping test for depth %d" % input_sizes[-1])
total_size = 1
for s in input_sizes:
total_size *= s
tf_logging.info("Running %s test. %r %r %d %r %r %r %s", data_format, v2,
input_sizes, total_size, pool_func, ksize, strides,
data_type)
# Initializes the input tensor with array containing incrementing
# numbers from 1, wrapping round to -127 after 127 to support int8.
y = -1 if use_negative_input else 1
x = [(((f + 128) % 255) - 127)*y for f in range(total_size)]
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format in ("NCHW", "NCHW_VECT_C", "NCW"):
if data_format == "NCHW_VECT_C":
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
t = test_util.NHWCToNCHW(t)
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, list):
padding = test_util.NHWCToNCHW(padding)
ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
if v2:
t = pool_func(
t,
ksize=ksize_placeholder,
strides=strides_placeholder,
padding=padding,
data_format=data_format)
else:
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW_VECT_C":
t = gen_array_ops.dequantize(t, -128, 127)
t = test_util.NCHW_VECT_CToNHWC(t)
elif data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
if v2:
actual = t.eval(feed_dict={
ksize_placeholder: ksize,
strides_placeholder: strides
})
else:
actual = self.evaluate(t)
self.assertShapeEqual(actual, t)
self.assertAllCloseAccordingToType(
expected, actual.flatten(), bfloat16_rtol=bfloat16_rtol
)
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2,
use_negative_input=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
use_negative_input: If the input values should be negative."
"""
if data_format == "NCHW_VECT_C":
avg_pool_func = nn_ops.avg_pool
tf_logging.info("pool_func=%s", pool_func)
if pool_func == avg_pool_func:
tf_logging.info("NCHW_VECT_C not yet implemented for avg_pool")
return
if (self._isMaxPool(pool_func) and isinstance(padding, list)):
tf_logging.info("NCHW_VECT_C not yet implemented for max pool" +
" with explicit padding")
return
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float32, expected, use_gpu, v2,
use_negative_input)
if not test.is_built_with_rocm():
# double datatype is not supported for pooling ops on the ROCm platform
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2,
use_negative_input)
if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float16, expected, use_gpu, v2,
use_negative_input)
def _VerifyValues(self,
pool_func,
input_sizes,
ksize,
strides,
padding,
expected,
use_gpu,
v2=False,
one_dim=False,
use_negative_input=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
one_dim: If one dimensional pools should be done instead of two
dimensional pools.
use_negative_input: If the input values should be negative.
"""
for (data_format, use_gpu_2) in GetTestConfigs(
include_nchw_vect_c=True, one_dimensional=one_dim):
if use_gpu_2 == use_gpu:
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2,
use_negative_input)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolEmpty(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 0],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[8.5, 9.5, 10.5, 14.5, 15.5, 16.5],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindow(self, **kwargs):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindow_2(self, **kwargs):
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, **kwargs):
self._VerifyOneType(
input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0
],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindowMultiBatch_2(self, **kwargs):
self._VerifyOneType(
input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0
],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPaddingUnevenStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPaddingUnevenStride_2(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePadding_2(self, **kwargs):
expected_output = [
11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingPacket_4(self, **kwargs):
expected_output = [
21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingPacket_8(self, **kwargs):
expected_output = [
-12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0,
-51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0,
-33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0,
-15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0,
-9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0,
36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0,
-79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0,
-53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0,
-39.0, -38.0, -37.0, -36.0, -35.0, -34.0
]
self._VerifyOneType(
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
bfloat16_rtol=3e-2,
**kwargs,
)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolEmptyInput(self, **kwargs):
self._VerifyOneType(
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
**kwargs)
@test_util.run_in_graph_and_eager_modes
def testRawAvgPoolLargeKsizeRaiseError(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
with self.cached_session():
t = gen_nn_ops.avg_pool(
value=np.ones([1, 1, 1, 1]),
ksize=[1, 9223372036854775807, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC")
self.evaluate(t)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[13.0, 14.0, 15.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[13.0, 14.0, 15.0, 16.0, 17.0, 18.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolZeroExplicitPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 0], [0, 0], [0, 0]],
expected=[9.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolNegativeInputExpPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [2, 1], [2, 1], [0, 0]],
expected=[-1, -1, -1, -1],
use_negative_input=True,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding(self, **kwargs):
expected_output = [9.0, 9.0]
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 2], [0, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPaddingAdvanced(self, **kwargs):
expected_output = [7, 9, 11, 12, 19, 21, 23, 24, 31, 33, 35, 36, 31, 33,
35, 36]
self._VerifyOneType(
input_sizes=[1, 6, 6, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [1, 2], [2, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolNegativeInputExpPaddingAdv(self, **kwargs):
expected_output = [-1, -1, -3, -5, -7, -7, -9, -11, -19, -19, -21, -23, -31,
-31, -33, -35]
self._VerifyOneType(
input_sizes=[1, 6, 6, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [1, 2], [2, 1], [0, 0]],
expected=expected_output,
use_negative_input=True,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding2_(self, **kwargs):
expected_output = [9.0, 9.0]
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 2], [0, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool1d, nn_ops.max_pool_v2, one_dimensional=True))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding_1D(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 1],
ksize=[1, 2, 1],
strides=[1, 2, 1],
padding=[[0, 0], [0, 1], [0, 0]],
expected=[2.0, 3.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingNonSquareWindow(self, **kwargs):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPaddingUnevenStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPaddingUnevenStride2_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingPacket4_(self, **kwargs):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingPacket8_(self, **kwargs):
expected_output = [
81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0,
101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0,
18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0,
38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0,
58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0,
86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0,
105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0,
123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0,
-42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0,
-24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0,
-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0
]
self._VerifyOneType(
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolEmptyInput(self, **kwargs):
self._VerifyOneType(
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolInvalidFilterSize(self, **kwargs):
with self.cached_session(use_gpu=test.is_gpu_available()):
t = constant_op.constant(1.0, shape=[1, 1, 1, 1])
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, ValueError),
"Negative dimension size"):
t = self.evaluate(
nn_ops.max_pool(t, ksize=[1, 1, 2, 1], strides=1, padding="VALID"))
@test_util.run_in_graph_and_eager_modes
def testMaxPoolWithArgmaxKsizeOverflow(self):
with self.assertRaisesRegex(
(ValueError, errors_impl.InvalidArgumentError),
"ksize must be a positive int32 value",
):
with self.cached_session():
t = gen_nn_ops.max_pool_with_argmax(
input=[[[[1, 1, 1]]]],
ksize=[1, -2**31, 4, 1],
strides=[1, 1000, 1, 7],
padding="SAME")
self.evaluate(t)
# Tests for DepthwiseMaxPooling on CPU only.
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testDepthwiseMaxPool1x1DepthWindow(self, **kwargs):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyOneType(
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testDepthwiseMaxPool2x2DepthWindow(self, **kwargs):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyOneType(
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testMaxPoolKernelSmallerThanStrideValid(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolKernelSmallerThanStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[5, 8, 26, 29],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +
GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideSame1_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +
GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideSame2_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
**kwargs)
def _testDepthwiseMaxPoolInvalidConfig(self,
in_size,
ksize,
strides,
error_msg,
use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(1.0, shape=in_size)
with self.assertRaisesRegex(errors_impl.UnimplementedError, error_msg):
t = nn_ops.max_pool(
t, ksize=ksize, strides=strides, padding="SAME").eval()
@test_util.disable_xla("b/123338077") # Passes with XLA
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],
"exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],
"depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if test.is_gpu_available():
with self.session():
t = variables.Variable(np.ones([1, 2, 2, 4]))
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesOpError("for CPU devices"):
nn_ops.max_pool(
t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same results.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16
] + [np.float64] if not test.is_built_with_rocm() else []:
tensor_input = np.random.rand(*input_shape).astype(dtype)
with self.cached_session():
t = constant_op.constant(tensor_input, shape=input_shape)
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
cpu_val = self.evaluate(out_op)
self.assertAllCloseAccordingToType(cpu_val, gpu_val)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16, dtypes.bfloat16.as_numpy_dtype
] + [np.float64] if not test.is_built_with_rocm() else []:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.cached_session():
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val,
gpu_val,
half_rtol=0.01,
half_atol=0.01,
bfloat16_rtol=0.02,
bfloat16_atol=0.1)
def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides,
padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16, dtypes.bfloat16.as_numpy_dtype
] + [np.float64] if not test.is_built_with_rocm() else []:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
t, grad_in, argmax, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def testMaxPoolingWithArgmax(self):
tensor_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax", "Targmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int32),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int32),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),
]
for config in configs:
with GetDeviceScope(self, use_gpu=config.use_gpu):
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
out_op, argmax_op = nn_ops.max_pool_with_argmax(
t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=config.Targmax,
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out, argmax = self.evaluate([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(),
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), config.argmax)
def testDepthwiseMaxPoolingWithArgmax(self):
tensor_input = [89, 73, -109]
Config = collections.namedtuple("Config", ["use_gpu", "padding"])
configs = [
Config(False, "SAME"),
Config(False, "VALID"),
Config(True, "SAME"),
Config(True, "VALID"),
]
for config in configs:
with GetDeviceScope(self, use_gpu=config.use_gpu):
t = constant_op.constant(tensor_input, shape=[1, 1, 1, 3])
out_op, argmax_op = nn_ops.max_pool_with_argmax(
t,
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding=config.padding,
)
out, argmax = self.evaluate([out_op, argmax_op])
# TODO(b/259733542): Fix below asserts once bug is fixed.
# self.assertShapeEqual(out, out_op)
# self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(), [89, 73, -109])
self.assertAllClose(argmax.ravel(), [0, 1, 2])
def testMaxPoolingGradWithArgmax(self):
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17]),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out, [
11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0, 21.0, 0.0, 22.0,
0.0, 0.0, 0.0, 23.0, 0.0, 24.0
])
def testMaxPoolingGradThrowDeterminismError(self):
if test.is_gpu_available(cuda_only=True):
try:
config_exec.enable_op_determinism()
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0,
0.0, 0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
with GetDeviceScope(self, True):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
[0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)
with self.assertRaisesRegex(
errors_impl.UnimplementedError, "Determinism is not yet supported"
" for MaxPoolGradWithArgmax."):
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=False)
self.evaluate(out_op)
finally:
config_exec.disable_op_determinism()
else:
try:
config_exec.enable_op_determinism()
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0,
0.0, 0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
with GetDeviceScope(self, False):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
[0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=False)
self.evaluate(out_op)
finally:
config_exec.disable_op_determinism()
def testMaxPoolingGradGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 21.0, 22.0, 23.0,
24.0, 25.0, 26.0, 27.0, 28.0, 29.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out,
[11.0, 12.0, 14.0, 16.0, 21.0, 23.0, 27.0, 29.0])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the gradients of the max or avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-4
else:
if x_init_value is None:
x_init_value = np.asarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-3
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_cols]
strides = [1, 1, row_stride, col_stride]
if isinstance(padding, list):
padding = test_util.NHWCToNCHW(padding)
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_cols, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s gradient error = %.4f" % (func_name, err))
self.assertLess(err, err_tolerance)
def _ConstructAndTestSecondGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the second-order gradients of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-3
else:
if x_init_value is None:
x_init_value = np.asarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-2
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NHWCToNCHW(t)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s second-order gradient error = %.4f" % (func_name, err))
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 2, 2, 1],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPadding_1(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPadding_2(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 6, 8, 1],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingLeftGreater(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 6, 8, 1],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [3, 2], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingBatchChannel(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[4, 7, 7, 3],
output_sizes=[4, 6, 8, 3],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [3, 2], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingStrides(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 4, 3, 1],
window_rows=3,
window_cols=3,
row_stride=2,
col_stride=3,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradValidPadding1_2(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding3_1(data_format, use_gpu)
self._testMaxPoolExplicitPadding_1(data_format, use_gpu)
self._testMaxPoolExplicitPadding_2(data_format, use_gpu)
self._testMaxPoolExplicitPaddingStrides(data_format, use_gpu)
self._testMaxPoolExplicitPaddingLeftGreater(data_format, use_gpu)
self._testMaxPoolExplicitPaddingBatchChannel(data_format, use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding, v2):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad
if v2:
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
else:
padding, explicit_paddings = nn_ops.convert_padding(padding)
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding,
explicit_paddings)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu, v2):
pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool
with self.cached_session(use_gpu=use_gpu):
input_tensor = variables.Variable(
np.array(input_data, dtype=np.float32).reshape(input_sizes))
self.evaluate(variables.global_variables_initializer())
output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = constant_op.constant(
output_backprop, shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor, window_rows,
window_cols, row_stride, col_stride, padding, v2)
actual_input_backprop = self.evaluate(input_backprop_tensor)
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = self.evaluate(output_tensor).flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(
expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
]
output_backprop = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0
]
expected_input_backprop = [
54,
0.0,
62,
0.0,
0.0,
60,
0.0,
22.0,
47,
0.0,
51,
0.0,
0.0,
0.0,
0.0,
0.0,
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 4, 4, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradZeroExplicitPadding(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 0], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_1(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 22.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 4, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
54.0, 0.0, 30.0, 0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 0.0, 21.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=3,
window_cols=3,
row_stride=2,
col_stride=2,
padding=[[0, 0], [2, 1], [2, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_3(self):
input_data = [
-1.0, -5.0, -1.0, -5.0, -5.0, -1.0, -5.0, -1.0, -1.0, -5.0, -1.0, -5.0,
-5.0, -1.0, -5.0, -1.0
]
output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 22.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 4, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# The functionality associated with TF_ENABLE_NANPROP is currently
# not supported on the ROCm platform, so skip this part of the test
# NANs in input lead to non-deterministic results, and hence skipping
# the remaining tests altogether on the ROCm platform
if test.is_built_with_rocm():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0, 15.0,
float("nan"), 17.0, 19.0, 20.0,
float("nan")
]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0, 15.0,
float("nan"), 17.0, 0.0, 19.0, 20.0,
float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# The functionality associated with TF_ENABLE_NANPROP is currently
# not supported on the ROCm platform, so skip this part of the test
# NANs in input lead to non-deterministic results, and hence skipping
# the remaining tests altogether on the ROCm platform
if test.is_built_with_rocm():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.run_deprecated_v1
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
self._testMaxPoolGradZeroExplicitPadding()
self._testMaxPoolGradExplicitPadding_1()
self._testMaxPoolGradExplicitPadding_2()
self._testMaxPoolGradExplicitPadding_3()
def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGradGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Second-Order Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x out_rows x out_cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops.max_pool_grad_grad(
orig_input, orig_output, grad, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
@test_util.run_deprecated_v1
def testAvgPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testAvgPoolGradValidPadding1_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding1_2(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding3_1(data_format, use_gpu)
def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.disable_xla("Xla does not raise error on out of bounds access")
def testAvgPoolGradOutputMemoryOutOfBounds(self):
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
(
# CPU error message
"(Output only has 3 elements but computation requested would use"
" element with index=6"
")|("
# GPU error message
r"Expected grad shape to be \[1,1,3,1\], but got \[3,1,3,1\])"
),
):
self.evaluate(
gen_nn_ops.AvgPoolGrad(
orig_input_shape=[1, 1, 3, 1],
grad=[
[[[1.0], [2.0], [3.0]]],
[[[4.0], [5.0], [6.0]]],
[[[7.0], [8.0], [9.0]]],
],
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 2],
padding="VALID",
data_format="NHWC",
)
)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
p = pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = nn_ops.max_pool_with_argmax(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [
nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax
]:
with self.assertRaises(ValueError):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]
if test.is_gpu_available():
pool_funcs.append(nn_ops.max_pool_with_argmax)
for pool_func in pool_funcs:
if pool_func != nn_ops.max_pool:
# Illegal strides.
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Pooling is not yet supported on the batch"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[2, 1, 1, 1],
padding="SAME"))
# Filter too large.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1],
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1],
strides=[1, 1, 1, 1],
padding="VALID")
@test_util.run_deprecated_v1
def testEdgeCasesRaiseErrors(self):
with self.assertRaisesRegex(
ValueError, "NCHW_VECT_C.*is not supported with "
"explicit padding|XLA does not support pooling ops with explicit "
"padding"):
nn_ops.max_pool(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1]),
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 1], [0, 1], [0, 0]],
data_format="NCHW_VECT_C")
with self.assertRaisesRegex(
ValueError, "Explicit padding is not supported with an input "
"tensor of rank 5"):
nn_ops.max_pool_v2(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),
ksize=[1, 2, 2, 1, 1],
strides=[1, 2, 2, 1, 1],
padding=[[0, 0], [0, 1], [0, 1], [0, 0]],
data_format="NCHW")
with self.assertRaisesRegex(
ValueError, "Attr 'padding' of 'MaxPoolV2' Op passed "
"string 'EXPLICIT'"):
gen_nn_ops.max_pool_v2(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),
ksize=[1, 2, 2, 1, 1],
strides=[1, 2, 2, 1, 1],
padding="EXPLICIT",
data_format="NHWC")
@test_util.run_deprecated_v1
def testEdgeCasesExcessPadding(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegex(
(errors_impl.UnimplementedError, errors_impl.InvalidArgumentError),
"Right padding 2 needs to be smaller than the window size 2|"
"XLA does not support pooling ops with explicit padding"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[0, 0, 0, 1, 0, 2, 0, 0],
data_format="NHWC"))
@test_util.run_deprecated_v1
def testNegativePadding(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegex(
ValueError, "All elements of explicit_paddings must be "
"nonnegative for"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[0, 0, -1, -1, -1, -1, 0, 0],
data_format="NHWC"))
@test_util.run_deprecated_v1
def testExplicitPaddingBatch(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegex(
ValueError, "Nonzero explicit padding in the batch or depth "
"dimensions is not supported"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[1, 1, 1, 1, 1, 1, 0, 0],
data_format="NHWC"))
@test_util.disable_xla(
"b/205634417") # XLA is not throwing shape errors for multiple *Grad ops.
def testMaxPoolGradEagerShapeErrors(self):
with context.eager_mode():
orig_in = array_ops.ones((1, 1, 1, 1))
# Test invalid orig_out shape
orig_out = array_ops.ones((1, 1, 1, 2))
grad = array_ops.ones((1, 1, 1, 1))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected orig_output shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected orig_output shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# Test invalid grad shape
orig_out = array_ops.ones((1, 1, 1, 1))
grad = array_ops.ones((1, 1, 1, 2))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
def testMaxPoolGradWithArgmaxEagerShapeErrors(self):
with context.eager_mode():
inp = array_ops.ones((1, 1, 1, 1))
# Test invalid grad shape
grad = array_ops.ones((1, 1, 1, 2))
argmax = array_ops.zeros((1, 1, 1, 1), dtype=dtypes.int64)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# max_pool_grad_grad_with_argmax is only implemented for GPUs
if test.is_gpu_available():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# Test invalid argmax shape
grad = array_ops.ones((1, 1, 1, 1))
argmax = array_ops.ones((1, 1, 1, 2), dtype=dtypes.int64)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected argmax shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# max_pool_grad_grad_with_argmax is only implemented for GPUs
if test.is_gpu_available():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected argmax shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
def testAvgPoolGradInvalidInputShapeRaiseError(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
with self.cached_session():
orig_input_shape = constant_op.constant(
-536870912, shape=[4], dtype=dtypes.int32)
grad = constant_op.constant(
.0890338004362538, shape=[1, 5, 7, 1], dtype=dtypes.float64)
t = gen_nn_ops.AvgPoolGrad(
orig_input_shape=orig_input_shape,
grad=grad,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self.evaluate(t)
def testAvgPoolGradInvalidStrideRaiseErrorProperly(self):
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
orig_input_shape = [11, 9, 78, 9]
grad = constant_op.constant(
0.1, shape=[16, 16, 16, 16], dtype=dtypes.float64)
t = gen_nn_ops.AvgPoolGrad(
orig_input_shape=orig_input_shape,
grad=grad,
ksize=[1, 40, 128, 1],
strides=[1, 128, 128, 30],
padding="SAME",
data_format="NHWC")
self.evaluate(t)
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
padding)
return Test
def GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides,
padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides,
padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetShrunkInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,
padding_))
setattr(PoolingTest, "testMaxPoolGradGrad_" + name_,
GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
test.main()
| PoolingTest |
python | psf__black | tests/data/cases/fmtonoff5.py | {
"start": 2810,
"end": 3253
} | class ____:
async def call(param):
if param:
# fmt: off
if param[0:4] in (
"ABCD", "EFGH"
) :
# fmt: on
print ( "This won't be formatted" )
elif param[0:4] in ("ZZZZ",):
print ( "This won't be formatted either" )
print("This will be formatted")
# Regression test for https://github.com/psf/black/issues/2985.
| A |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils.py | {
"start": 30723,
"end": 38542
} | class ____(_Optimizer):
"""Optimization parameters for FTRL with TPU embeddings.
See Algorithm 1 of this
[paper](https://research.google.com/pubs/archive/41159.pdf).
Pass this to `tf.tpu.experimental.embedding.TPUEmbedding` via the `optimizer`
argument to set the global optimizer and its parameters:
```python
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
...
optimizer=tf.tpu.experimental.embedding.FTRL(0.1))
```
This can also be used in a `tf.tpu.experimental.embedding.TableConfig` as the
optimizer parameter to set a table specific optimizer. This will override the
optimizer and parameters for global embedding optimizer defined above:
```python
table_one = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...,
optimizer=tf.tpu.experimental.embedding.FTRL(0.2))
table_two = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
feature_config = (
tf.tpu.experimental.embedding.FeatureConfig(
table=table_one),
tf.tpu.experimental.embedding.FeatureConfig(
table=table_two))
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
batch_size=...
optimizer=tf.tpu.experimental.embedding.FTRL(0.1))
```
In the above example, the first feature will be looked up in a table that has
a learning rate of 0.2 while the second feature will be looked up in a table
that has a learning rate of 0.1.
See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a
complete description of these parameters and their impacts on the optimizer
algorithm.
"""
def __init__(
self,
learning_rate: Union[float, Callable[[], float]] = 0.001,
learning_rate_power: float = -0.5,
l1_regularization_strength: float = 0.0,
l2_regularization_strength: float = 0.0,
beta: float = 0.0,
initial_accumulator_value: float = 0.1,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
slot_variable_creation_fn: Optional[SlotVarCreationFnType] = None,
clipvalue: Optional[ClipValueType] = None,
multiply_linear_by_learning_rate: bool = False,
allow_zero_accumulator: bool = False,
low_dimensional_packing_status: bool = False,
):
"""Optimization parameters for Adagrad.
Args:
learning_rate: The learning rate. It should be a floating point value or a
callable taking no arguments for a dynamic learning rate.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for a
fixed learning rate.
l1_regularization_strength: A float value, must be greater than or equal
to zero.
l2_regularization_strength: A float value, must be greater than or equal
to zero.
beta: A float value, representing the beta value from the paper.
initial_accumulator_value: The starting value for accumulators. Only zero
or positive values are allowed.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
slot_variable_creation_fn: If you wish do directly control the creation of
the slot variables, set this to a callable taking three parameters: a
table variable, a list of slot names to create for it, and a list of
initializers. This function should return a dict with the slot names as
keys and the created variables as values with types matching the table
variable. When set to None (the default), uses the built-in variable
creation.
clipvalue: Controls clipping of the gradient. Set to either a single
positive scalar value to get clipping or a tuple of scalar values (min,
max) to set a separate maximum or minimum. If one of the two entries is
None, then there will be no clipping that direction.
multiply_linear_by_learning_rate: If set to True, a modified formula is
used for FTRL that treats the "linear" accumulator as being
pre-multiplied by the learning rate (i.e., the accumulator named
"linear" actually stores "linear * learning_rate"). Other than
checkpoint compatibility, this is mathematically equivalent for a static
learning rate; for a dynamic learning rate, it is nearly the same as
long as the learning rate does not change quickly. The benefit of this
is that the modified formula handles zero and near-zero learning rates
without producing NaNs, improving flexibility for learning rate ramp-up.
allow_zero_accumulator: If set to True, changes some internal formulas to
allow zero and near-zero accumulator values at the cost of some
performance; this only needs to be set if you are using an initial
accumulator value of zero, which is uncommon.
low_dimensional_packing_status: Status of the low-dimensional embedding
packing optimization controls whether to optimize the packing of
1-dimensional, 2-dimensional, and 4-dimensional embedding tables in
memory.
"""
super().__init__(
learning_rate,
use_gradient_accumulation,
clip_weight_min,
clip_weight_max,
weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate,
clipvalue,
slot_variable_creation_fn,
low_dimensional_packing_status,
)
if initial_accumulator_value <= 0:
raise ValueError(
f"Argument `initial_accumulator_value` must be a positive float. "
f"Received: {initial_accumulator_value}")
self.initial_accumulator_value = initial_accumulator_value
self.learning_rate_power = learning_rate_power
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.beta = beta
self.multiply_linear_by_learning_rate = multiply_linear_by_learning_rate
self.allow_zero_accumulator = allow_zero_accumulator
def _slot_names(self) -> List[Text]:
return ["accumulators", "linears"]
def _slot_initializers(self) -> List[init_ops_v2.Initializer]:
return [
init_ops_v2.Constant(
self.initial_accumulator_value, support_partition=True
),
init_ops_v2.Constant(support_partition=True),
]
def _set_optimization_parameters(
self, parameters: optimization_parameters_pb2.OptimizationParameters
):
super()._set_optimization_parameters(parameters)
ftrl = parameters.ftrl
ftrl.l1 = self.l1_regularization_strength
ftrl.l2 = self.l2_regularization_strength
ftrl.lr_power = self.learning_rate_power
ftrl.beta = self.beta
ftrl.multiply_linear_by_lr = self.multiply_linear_by_learning_rate
ftrl.allow_zero_accumulator = self.allow_zero_accumulator
def _load(self) -> Callable[..., ops.Operation]:
return tpu_ops.load_tpu_embedding_ftrl_parameters
def _retrieve(self) -> Callable[..., core.Tensor]:
return tpu_ops.retrieve_tpu_embedding_ftrl_parameters
@tf_export("tpu.experimental.embedding.Adam")
| FTRL |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 2351,
"end": 2603
} | class ____(BoringModel):
"""Tests that a model can take an object."""
@decorate
@decorate
def __init__(self, hparams, *my_args, **my_kwargs):
super().__init__()
self.save_hyperparameters(hparams)
| SaveHparamsDecoratedModel |
python | pytorch__pytorch | torch/ao/nn/quantized/dynamic/modules/linear.py | {
"start": 209,
"end": 6487
} | class ____(nnq.Linear):
r"""
A dynamic quantized linear module with floating point tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module which are of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable floating point bias of the module of shape
:math:`(\text{out\_features})`. If :attr:`bias` is ``True``,
the values are initialized to zero.
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.quantized.dynamic.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
# version used in this class is different from the parent class nnq.Linear
_version = 4
def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
super().__init__(in_features, out_features, bias_, dtype=dtype)
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
self.version = 4
def forward(self, x):
# Note that we can handle self.bias == None case.
if self._packed_params.dtype == torch.qint8:
if self.version is None or self.version < 4:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params
)
else:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params, reduce_range=True
)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_dynamic_fp16(
x, self._packed_params._packed_params
)
else:
raise RuntimeError("Unsupported dtype on dynamic quantized linear!")
return Y.to(x.dtype)
def _get_name(self):
return "DynamicQuantizedLinear"
def extra_repr(self):
extra_repr_str = f"in_features={self.in_features}, out_features={self.out_features}, dtype={self._packed_params.dtype}"
if self._packed_params.dtype == torch.qint8:
extra_repr_str += f", qscheme={self.weight().qscheme()}"
return extra_repr_str
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
self.version = version
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a dynamic quantized module from a float module or qparams_dict
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
float_modules = [
torch.nn.Linear,
torch.nn.modules.linear.NonDynamicallyQuantizableLinear,
torch.ao.nn.intrinsic.modules.fused.LinearReLU,
torch.ao.nn.qat.dynamic.Linear,
]
assert type(mod) in float_modules, (
"nn.quantized.dynamic.Linear.from_float only works for one of"
+ str([float_mod.__name__ for float_mod in float_modules])
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
if type(mod) is nni.LinearReLU:
mod = mod[0]
# pyrefly: ignore [missing-attribute]
if mod.qconfig is not None and mod.qconfig.weight is not None:
# pyrefly: ignore [not-callable]
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
dtype = weight_observer.dtype
assert dtype in [torch.qint8, torch.float16], (
"The only supported dtypes for "
f"dynamic quantized linear are qint8 and float16 got: {dtype}"
)
weight_observer(mod.weight)
if dtype == torch.qint8:
qweight = _quantize_weight(mod.weight.float(), weight_observer)
elif dtype == torch.float16:
qweight = mod.weight.float()
else:
raise RuntimeError(
"Unsupported dtype specified for dynamic quantized Linear!"
)
qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)
# pyrefly: ignore [bad-argument-type]
qlinear.set_weight_bias(qweight, mod.bias)
return qlinear
@classmethod
def from_reference(cls, ref_qlinear): # type: ignore[override]
"""Create a (fbgemm/qnnpack) dynamic quantized module from a reference quantized
module
Args:
ref_qlinear (Module): a reference quantized module, either produced by
torch.ao.quantization functions or provided by the user
"""
qlinear = cls(
ref_qlinear.in_features,
ref_qlinear.out_features,
dtype=ref_qlinear.weight_dtype,
)
qweight = ref_qlinear.get_quantized_weight()
bias = ref_qlinear.bias
qlinear.set_weight_bias(qweight, bias)
return qlinear
| Linear |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1438604,
"end": 1449693
} | class ____(TopLevelSpec):
"""
TopLevelConcatSpec schema wrapper.
Parameters
----------
concat : Sequence[dict, :class:`FacetSpec`, :class:`LayerSpec`, :class:`RepeatSpec`, :class:`FacetedUnitSpec`, :class:`LayerRepeatSpec`, :class:`NonNormalizedSpec`, :class:`NonLayerRepeatSpec`, :class:`ConcatSpecGenericSpec`, :class:`HConcatSpecGenericSpec`, :class:`VConcatSpecGenericSpec`]
A list of views to be concatenated.
align : dict, :class:`LayoutAlign`, :class:`RowColLayoutAlign`, Literal['all', 'each', 'none']
The alignment to apply to grid rows and columns. The supported string values are
``"all"``, ``"each"``, and ``"none"``.
* For ``"none"``, a flow layout will be used, in which adjacent subviews are simply
placed one after the other.
* For ``"each"``, subviews will be aligned into a clean grid structure, but each row
or column may be of variable size.
* For ``"all"``, subviews will be aligned and each row or column will be sized
identically based on the maximum observed size. String values for this property
will be applied to both grid rows and columns.
Alternatively, an object value of the form ``{"row": string, "column": string}`` can
be used to supply different alignments for rows and columns.
**Default value:** ``"all"``.
autosize : dict, :class:`AutosizeType`, :class:`AutoSizeParams`, Literal['pad', 'none', 'fit', 'fit-x', 'fit-y']
How the visualization size should be determined. If a string, should be one of
``"pad"``, ``"fit"`` or ``"none"``. Object values can additionally specify
parameters for content sizing and automatic resizing.
**Default value**: ``pad``
background : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
CSS color property to use as the background of the entire view.
**Default value:** ``"white"``
bounds : Literal['full', 'flush']
The bounds calculation method to use for determining the extent of a sub-plot. One
of ``full`` (the default) or ``flush``.
* If set to ``full``, the entire calculated bounds (including axes, title, and
legend) will be used.
* If set to ``flush``, only the specified width and height values for the sub-view
will be used. The ``flush`` setting can be useful when attempting to place
sub-plots without axes or legends into a uniform grid structure.
**Default value:** ``"full"``
center : bool, dict, :class:`RowColboolean`
Boolean flag indicating if subviews should be centered relative to their respective
rows or columns.
An object value of the form ``{"row": boolean, "column": boolean}`` can be used to
supply different centering values for rows and columns.
**Default value:** ``false``
columns : float
The number of columns to include in the view composition layout.
**Default value**: ``undefined`` -- An infinite number of columns (a single row)
will be assumed. This is equivalent to ``hconcat`` (for ``concat``) and to using the
``column`` channel (for ``facet`` and ``repeat``).
**Note**:
1) This property is only for:
* the general (wrappable) ``concat`` operator (not ``hconcat``/``vconcat``)
* the ``facet`` and ``repeat`` operator with one field/repetition definition
(without row/column nesting)
2) Setting the ``columns`` to ``1`` is equivalent to ``vconcat`` (for ``concat``)
and to using the ``row`` channel (for ``facet`` and ``repeat``).
config : dict, :class:`Config`
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
datasets : dict, :class:`Datasets`
A global data store for named datasets. This is a mapping from names to inline
datasets. This can be an array of objects or primitive values or a string. Arrays of
primitive values are ingested as objects with a ``data`` property.
description : str
Description of this mark for commenting purpose.
name : str
Name of the visualization for later reference.
padding : dict, float, :class:`ExprRef`, :class:`Padding`
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format ``{"left": 5, "top": 5, "right": 5,
"bottom": 5}`` to specify padding for each side of the visualization.
**Default value**: ``5``
params : Sequence[dict, :class:`TopLevelParameter`, :class:`VariableParameter`, :class:`TopLevelSelectionParameter`]
Dynamic variables or selections that parameterize a visualization.
resolve : dict, :class:`Resolve`
Scale, axis, and legend resolutions for view composition specifications.
spacing : dict, float, :class:`RowColnumber`
The spacing in pixels between sub-views of the composition operator. An object of
the form ``{"row": number, "column": number}`` can be used to set different spacing
values for rows and columns.
**Default value**: Depends on ``"spacing"`` property of `the view composition
configuration <https://vega.github.io/vega-lite/docs/config.html#view-config>`__
(``20`` by default)
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
usermeta : dict, :class:`Dict`
Optional metadata that will be passed to Vega. This object is completely ignored by
Vega and Vega-Lite and can be used for custom metadata.
$schema : str
URL to `JSON schema <http://json-schema.org/>`__ for a Vega-Lite specification.
Unless you have a reason to change this, use
``https://vega.github.io/schema/vega-lite/v6.json``. Setting the ``$schema``
property allows automatic validation and autocomplete in editors that support JSON
schema.
"""
_schema = {"$ref": "#/definitions/TopLevelConcatSpec"}
def __init__(
self,
concat: Optional[Sequence[SchemaBase | Map]] = Undefined,
align: Optional[SchemaBase | Map | LayoutAlign_T] = Undefined,
autosize: Optional[SchemaBase | Map | AutosizeType_T] = Undefined,
background: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
bounds: Optional[Literal["full", "flush"]] = Undefined,
center: Optional[bool | SchemaBase | Map] = Undefined,
columns: Optional[float] = Undefined,
config: Optional[SchemaBase | Map] = Undefined,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
datasets: Optional[SchemaBase | Map] = Undefined,
description: Optional[str] = Undefined,
name: Optional[str] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
resolve: Optional[SchemaBase | Map] = Undefined,
spacing: Optional[float | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
usermeta: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
concat=concat,
align=align,
autosize=autosize,
background=background,
bounds=bounds,
center=center,
columns=columns,
config=config,
data=data,
datasets=datasets,
description=description,
name=name,
padding=padding,
params=params,
resolve=resolve,
spacing=spacing,
title=title,
transform=transform,
usermeta=usermeta,
**kwds,
)
| TopLevelConcatSpec |
python | Textualize__textual | tests/command_palette/test_run_on_select.py | {
"start": 159,
"end": 651
} | class ____(Provider):
async def search(self, _: str) -> Hits:
def goes_nowhere_does_nothing(selection: int) -> None:
assert isinstance(self.app, CommandPaletteRunOnSelectApp)
self.app.selection = selection
for n in range(100):
yield Hit(
n + 1 / 100,
str(n),
partial(goes_nowhere_does_nothing, n),
str(n),
f"This is help for {n}",
)
| SimpleSource |
python | keras-team__keras | keras/src/callbacks/lambda_callback.py | {
"start": 146,
"end": 3459
} | class ____(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
- `on_train_batch_begin` and `on_train_batch_end` expect a positional
argument `batch` and a keyword argument `logs`
- See `Callback` class definition for the full list of functions and their
expected arguments.
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
on_train_batch_begin: called at the beginning of every train batch.
on_train_batch_end: called at the end of every train batch.
kwargs: Any function in `Callback` that you want to override by
passing `function_name=function`. For example,
`LambdaCallback(.., on_train_end=train_end_fn)`. The custom function
needs to have same arguments as the ones defined in `Callback`.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_train_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(
self,
on_epoch_begin=None,
on_epoch_end=None,
on_train_begin=None,
on_train_end=None,
on_train_batch_begin=None,
on_train_batch_end=None,
**kwargs,
):
super().__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
if on_train_begin is not None:
self.on_train_begin = on_train_begin
if on_train_end is not None:
self.on_train_end = on_train_end
if on_train_batch_begin is not None:
self.on_train_batch_begin = on_train_batch_begin
if on_train_batch_end is not None:
self.on_train_batch_end = on_train_batch_end
| LambdaCallback |
python | pytorch__pytorch | test/dynamo/test_python_autograd.py | {
"start": 6338,
"end": 8882
} | class ____(TestCase):
def _common(self, fn, expected_ops):
args1 = [torch.randn(10), torch.randn(10)]
args2 = [torch.randn(10), torch.randn(10)]
cnt = CompileCounter()
fn_dynamo = torch._dynamo.optimize_assert(cnt)(fn)
reset_tape()
res1 = fn_dynamo(*args1)
reset_tape()
res2 = fn_dynamo(*args2)
reset_tape()
self.assertTrue(same(res1, fn(*args1)))
reset_tape()
self.assertTrue(same(res2, fn(*args2)))
reset_tape()
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, expected_ops)
def test_forwards1(self):
def fn(a, b):
a = Variable.constant(a, name="a")
b = Variable.constant(b, name="b")
loss = simple(a, b).sum()
return loss
self._common(fn, 3)
def test_forwards2(self):
def fn(a, b):
reset_tape()
a = Variable.constant(a, name="a")
b = Variable.constant(b, name="b")
loss = simple(a, b).sum()
reset_tape()
return loss
self._common(fn, 3)
def test_backwards1(self):
def fn(a, b):
a = Variable.constant(a, name="a")
b = Variable.constant(b, name="b")
loss = simple(a, b).sum()
return grad(loss, [a, b])
self._common(fn, 8)
def test_backwards2(self):
def fn(a, b):
reset_tape()
a = Variable.constant(a, name="a")
b = Variable.constant(b, name="b")
loss = simple(a, b).sum()
res = grad(loss, [a, b])
reset_tape()
return res
self._common(fn, 8)
def test_split(self):
v1 = Variable.constant(torch.randn(10), name="a")
v2 = Variable.constant(torch.randn(10), name="b")
cnt = CompileCounter()
def forward(a, b):
return simple(a, b).sum()
reset_tape()
loss1 = forward(v1, v2)
grad1 = grad(loss1, [v1, v2])
reset_tape()
opt_forward = torch._dynamo.optimize_assert(cnt)(forward)
opt_grad = torch._dynamo.optimize_assert(cnt)(grad)
loss2 = opt_forward(v1, v2)
# force two frames
grad2 = opt_grad(loss2, [v1, v2])
self.assertTrue(same(loss1, loss2))
self.assertTrue(same(grad1, grad2))
self.assertEqual(cnt.frame_count, 2)
self.assertEqual(cnt.op_count, 8)
if __name__ == "__main__":
run_tests()
| TestPythonAutograd |
python | tensorflow__tensorflow | tensorflow/core/tfrt/saved_model/tests/gen_saved_model_v2.py | {
"start": 1386,
"end": 2152
} | class ____(module.Module):
"""Defines a toy module."""
def __init__(self):
super(ToyModule, self).__init__()
self.w = variables.Variable(constant_op.constant([[1], [2], [3]]), name='w')
@def_function.function(input_signature=[
tensor_spec.TensorSpec([1, 3], dtypes.int32, name='input')
])
def toy(self, x):
r = math_ops.matmul(x, self.w, name='result')
return r
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
v2_compat.enable_v2_behavior()
save.save(
ToyModule(),
FLAGS.saved_model_path,
options=save_options.SaveOptions(save_debug_info=False))
logging.info('Saved model to: %s', FLAGS.saved_model_path)
if __name__ == '__main__':
app.run(main)
| ToyModule |
python | kamyu104__LeetCode-Solutions | Python/earliest-possible-day-of-full-bloom.py | {
"start": 33,
"end": 477
} | class ____(object):
def earliestFullBloom(self, plantTime, growTime):
"""
:type plantTime: List[int]
:type growTime: List[int]
:rtype: int
"""
order = range(len(growTime))
order.sort(key=lambda x: growTime[x], reverse=True)
result = curr = 0
for i in order:
curr += plantTime[i]
result = max(result, curr+growTime[i])
return result
| Solution |
python | apache__airflow | providers/ydb/tests/unit/ydb/operators/test_ydb.py | {
"start": 2964,
"end": 5152
} | class ____:
def setup_method(self):
dag_id = "test_dag"
self.dag = DAG(
dag_id,
default_args={
"owner": "airflow",
"start_date": datetime.today(),
"end_date": datetime.today() + timedelta(days=1),
},
schedule="@once",
)
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("ydb.Driver")
@patch("ydb.QuerySessionPool")
@patch("ydb_dbapi.Connection._cursor_cls", new_callable=PropertyMock)
def test_execute_query(self, cursor_class, mock_session_pool, mock_driver, mock_get_connection):
mock_get_connection.return_value = Connection(
conn_type="ydb", host="localhost", extra={"database": "/my_db"}
)
cursor_class.return_value = FakeYDBCursor
driver = FakeDriver()
mock_driver.return_value = driver
session_pool = FakeSessionPool(driver)
mock_session_pool.return_value = session_pool
context = {"ti": MagicMock()}
operator = YDBExecuteQueryOperator(
task_id="simple_sql", sql="select 987", is_ddl=False, handler=fetch_one_handler
)
results = operator.execute(context)
assert results == "fetchone: result"
operator = YDBExecuteQueryOperator(
task_id="simple_sql", sql="select 987", is_ddl=False, handler=fetch_all_handler
)
results = operator.execute(context)
assert results == "fetchall: result"
hook = operator.get_db_hook()
column_types = (
ydb.BulkUpsertColumns()
.add_column("a", ydb.OptionalType(ydb.PrimitiveType.Uint64))
.add_column("b", ydb.OptionalType(ydb.PrimitiveType.Utf8))
)
rows = [
{"a": 1, "b": "hello"},
{"a": 888, "b": "world"},
]
hook.bulk_upsert("my_table", rows=rows, column_types=column_types)
assert len(session_pool._driver.table_client.bulk_upsert_args) == 1
arg0 = session_pool._driver.table_client.bulk_upsert_args[0]
assert arg0[0] == "/my_db/my_table"
assert len(arg0[1]) == 2
| TestYDBExecuteQueryOperator |
python | mlflow__mlflow | mlflow/models/utils.py | {
"start": 6373,
"end": 76692
} | class ____:
"""
Represents an input example for MLflow model.
Contains jsonable data that can be saved with the model and meta data about the exported format
that can be saved with :py:class:`Model <mlflow.models.Model>`.
The _Example is created from example data provided by user. The example(s) can be provided as
pandas.DataFrame, numpy.ndarray, python dictionary or python list. The assumption is that the
example contains jsonable elements (see storage format section below). The input example will
be saved as a json serializable object if it is a pandas DataFrame or numpy array.
If the example is a tuple, the first element is considered as the example data and the second
element is considered as the example params.
NOTE: serving input example is not supported for sparse matrices yet.
Metadata:
The _Example metadata contains the following information:
- artifact_path: Relative path to the serialized example within the model directory.
- serving_input_path: Relative path to the serialized example used for model serving
within the model directory.
- type: Type of example data provided by the user. Supported types are:
- ndarray
- dataframe
- json_object
- sparse_matrix_csc
- sparse_matrix_csr
If the `type` is `dataframe`, `pandas_orient` is also stored in the metadata. This
attribute specifies how is the dataframe encoded in json. For example, "split" value
signals that the data is stored as object with columns and data attributes.
Storage Format:
The examples are stored as json for portability and readability. Therefore, the contents of the
example(s) must be jsonable. MLflow will make the following conversions automatically on behalf
of the user:
- binary values: :py:class:`bytes` or :py:class:`bytearray` are converted to base64
encoded strings.
- numpy types: Numpy types are converted to the corresponding python types or their closest
equivalent.
- csc/csr matrix: similar to 2 dims numpy array, csc/csr matrix are converted to
corresponding python types or their closest equivalent.
"""
def __init__(self, input_example: ModelInputExample):
try:
import pyspark.sql
if isinstance(input_example, pyspark.sql.DataFrame):
raise MlflowException(
"Examples can not be provided as Spark Dataframe. "
"Please make sure your example is of a small size and "
"turn it into a pandas DataFrame."
)
except ImportError:
pass
self.info = {
INPUT_EXAMPLE_PATH: EXAMPLE_FILENAME,
}
self._inference_data, self._inference_params = _split_input_data_and_params(
deepcopy(input_example)
)
if self._inference_params:
self.info[EXAMPLE_PARAMS_KEY] = "true"
model_input = deepcopy(self._inference_data)
if isinstance(model_input, pydantic.BaseModel):
model_input = model_input.model_dump()
is_unified_llm_input = False
if isinstance(model_input, dict):
"""
Supported types are:
- Dict[str, Union[DataType, List, Dict]] --> type: json_object
- Dict[str, numpy.ndarray] --> type: ndarray
"""
if any(isinstance(values, np.ndarray) for values in model_input.values()):
if not all(isinstance(values, np.ndarray) for values in model_input.values()):
raise MlflowException.invalid_parameter_value(
"Mixed types in dictionary are not supported as input examples. "
"Found numpy arrays and other types."
)
self.info["type"] = "ndarray"
model_input = _handle_ndarray_input(model_input)
self.serving_input = {INPUTS: model_input}
else:
from mlflow.pyfunc.utils.serving_data_parser import is_unified_llm_input
self.info["type"] = "json_object"
is_unified_llm_input = is_unified_llm_input(model_input)
if is_unified_llm_input:
self.serving_input = model_input
else:
self.serving_input = {INPUTS: model_input}
elif isinstance(model_input, np.ndarray):
"""type: ndarray"""
model_input = _handle_ndarray_input(model_input)
self.info["type"] = "ndarray"
self.serving_input = {INPUTS: model_input}
elif isinstance(model_input, list):
"""
Supported types are:
- List[DataType]
- List[Dict[str, Union[DataType, List, Dict]]]
--> type: json_object
"""
if _contains_nd_array(model_input):
raise TensorsNotSupportedException(
"Numpy arrays in list are not supported as input examples."
)
self.info["type"] = "json_object"
self.serving_input = {INPUTS: model_input}
elif _is_sparse_matrix(model_input):
"""
Supported types are:
- scipy.sparse.csr_matrix
- scipy.sparse.csc_matrix
Note: This type of input is not supported by the scoring server yet
"""
if isinstance(model_input, csc_matrix):
example_type = "sparse_matrix_csc"
else:
example_type = "sparse_matrix_csr"
self.info["type"] = example_type
self.serving_input = {INPUTS: model_input.toarray()}
model_input = _handle_sparse_matrix(model_input)
elif isinstance(model_input, pd.DataFrame):
model_input = _convert_dataframe_to_split_dict(model_input)
self.serving_input = {DF_SPLIT: model_input}
orient = "split" if "columns" in model_input else "values"
self.info.update(
{
"type": "dataframe",
"pandas_orient": orient,
}
)
elif np.isscalar(model_input) or isinstance(model_input, dt.datetime):
self.info["type"] = "json_object"
self.serving_input = {INPUTS: model_input}
else:
raise MlflowException.invalid_parameter_value(
"Expected one of the following types:\n"
"- pandas.DataFrame\n"
"- numpy.ndarray\n"
"- dictionary of (name -> numpy.ndarray)\n"
"- scipy.sparse.csr_matrix\n"
"- scipy.sparse.csc_matrix\n"
"- dict\n"
"- list\n"
"- scalars\n"
"- datetime.datetime\n"
"- pydantic model instance\n"
f"but got '{type(model_input)}'",
)
if self._inference_params is not None:
"""
Save input data and params with their respective keys, so we can load them separately.
"""
model_input = {
EXAMPLE_DATA_KEY: model_input,
EXAMPLE_PARAMS_KEY: self._inference_params,
}
if self.serving_input:
if is_unified_llm_input:
self.serving_input = {
**(self.serving_input or {}),
**self._inference_params,
}
else:
self.serving_input = {
**(self.serving_input or {}),
SERVING_PARAMS_KEY: self._inference_params,
}
self.json_input_example = json.dumps(model_input, cls=NumpyEncoder)
if self.serving_input:
self.json_serving_input = json.dumps(self.serving_input, cls=NumpyEncoder, indent=2)
self.info[SERVING_INPUT_PATH] = SERVING_INPUT_FILENAME
else:
self.json_serving_input = None
def save(self, parent_dir_path: str):
"""
Save the example as json at ``parent_dir_path``/`self.info['artifact_path']`.
Save serving input as json at ``parent_dir_path``/`self.info['serving_input_path']`.
"""
with open(os.path.join(parent_dir_path, self.info[INPUT_EXAMPLE_PATH]), "w") as f:
f.write(self.json_input_example)
if self.json_serving_input:
with open(os.path.join(parent_dir_path, self.info[SERVING_INPUT_PATH]), "w") as f:
f.write(self.json_serving_input)
@property
def inference_data(self):
"""
Returns the input example in a form that PyFunc wrapped models can score.
"""
return self._inference_data
@property
def inference_params(self):
"""
Returns the params dictionary that PyFunc wrapped models can use for scoring.
"""
return self._inference_params
def _contains_params(input_example):
# For tuple input, we assume the first item is input_example data
# and the second item is params dictionary.
return (
isinstance(input_example, tuple)
and len(input_example) == 2
and isinstance(input_example[1], dict)
)
def _split_input_data_and_params(input_example):
if _contains_params(input_example):
input_data, inference_params = input_example
_validate_params(inference_params)
return input_data, inference_params
return input_example, None
def convert_input_example_to_serving_input(input_example) -> str | None:
"""
Helper function to convert a model's input example to a serving input example that
can be used for model inference in the scoring server.
Args:
input_example: model input example. Supported types are pandas.DataFrame, numpy.ndarray,
dictionary of (name -> numpy.ndarray), list, scalars and dicts with json serializable
values.
Returns:
serving input example as a json string
"""
if input_example is None:
return None
example = _Example(input_example)
return example.json_serving_input
def _save_example(
mlflow_model: Model, input_example: ModelInputExample | None, path: str
) -> _Example | None:
"""
Saves example to a file on the given path and updates passed Model with example metadata.
The metadata is a dictionary with the following fields:
- 'artifact_path': example path relative to the model directory.
- 'type': Type of example. Currently the supported values are 'dataframe' and 'ndarray'
- One of the following metadata based on the `type`:
- 'pandas_orient': Used to store dataframes. Determines the json encoding for dataframe
examples in terms of pandas orient convention. Defaults to 'split'.
- 'format: Used to store tensors. Determines the standard used to store a tensor input
example. MLflow uses a JSON-formatted string representation of TF serving
input.
Args:
mlflow_model: Model metadata that will get updated with the example metadata.
path: Where to store the example file. Should be model the model directory.
Returns:
_Example object that contains saved input example.
"""
if input_example is None:
return None
example = _Example(input_example)
example.save(path)
mlflow_model.saved_input_example_info = example.info
return example
def _get_mlflow_model_input_example_dict(
mlflow_model: Model, uri_or_path: str
) -> dict[str, Any] | None:
"""
Args:
mlflow_model: Model metadata.
uri_or_path: Model or run URI, or path to the `model` directory.
e.g. models://<model_name>/<model_version>, runs:/<run_id>/<artifact_path>
or /path/to/model
Returns:
Input example or None if the model has no example.
"""
if mlflow_model.saved_input_example_info is None:
return None
example_type = mlflow_model.saved_input_example_info["type"]
if example_type not in [
"dataframe",
"ndarray",
"sparse_matrix_csc",
"sparse_matrix_csr",
"json_object",
]:
raise MlflowException(f"This version of mlflow can not load example of type {example_type}")
return json.loads(
_read_file_content(uri_or_path, mlflow_model.saved_input_example_info[INPUT_EXAMPLE_PATH])
)
def _load_serving_input_example(mlflow_model: Model, path: str) -> str | None:
"""
Load serving input example from a model directory. Returns None if there is no serving input
example.
Args:
mlflow_model: Model metadata.
path: Path to the model directory.
Returns:
Serving input example or None if the model has no serving input example.
"""
if mlflow_model.saved_input_example_info is None:
return None
serving_input_path = mlflow_model.saved_input_example_info.get(SERVING_INPUT_PATH)
if serving_input_path is None:
return None
with open(os.path.join(path, serving_input_path)) as handle:
return handle.read()
def load_serving_example(model_uri_or_path: str):
"""
Load serving input example from a model directory or URI.
Args:
model_uri_or_path: Model URI or path to the `model` directory.
e.g. models://<model_name>/<model_version> or /path/to/model
"""
return _read_file_content(model_uri_or_path, SERVING_INPUT_FILENAME)
def _read_file_content(uri_or_path: str, file_name: str):
"""
Read file content from a model directory or URI.
Args:
uri_or_path: Model or run URI, or path to the `model` directory.
e.g. models://<model_name>/<model_version>, runs:/<run_id>/<artifact_path>
or /path/to/model
file_name: Name of the file to read.
"""
from mlflow.store.artifact.models_artifact_repo import ModelsArtifactRepository
if ModelsArtifactRepository._is_logged_model_uri(uri_or_path):
uri_or_path = ModelsArtifactRepository.get_underlying_uri(uri_or_path)
file_path = str(uri_or_path).rstrip("/") + "/" + file_name
if os.path.exists(file_path):
with open(file_path) as handle:
return handle.read()
else:
with tempfile.TemporaryDirectory() as tmpdir:
local_file_path = _download_artifact_from_uri(file_path, output_path=tmpdir)
with open(local_file_path) as handle:
return handle.read()
def _read_example(mlflow_model: Model, uri_or_path: str):
"""
Read example from a model directory. Returns None if there is no example metadata (i.e. the
model was saved without example). Raises FileNotFoundError if there is model metadata but the
example file is missing.
Args:
mlflow_model: Model metadata.
uri_or_path: Model or run URI, or path to the `model` directory.
e.g. models://<model_name>/<model_version>, runs:/<run_id>/<artifact_path>
or /path/to/model
Returns:
Input example data or None if the model has no example.
"""
input_example = _get_mlflow_model_input_example_dict(mlflow_model, uri_or_path)
if input_example is None:
return None
example_type = mlflow_model.saved_input_example_info["type"]
input_schema = mlflow_model.signature.inputs if mlflow_model.signature is not None else None
if mlflow_model.saved_input_example_info.get(EXAMPLE_PARAMS_KEY, None):
input_example = input_example[EXAMPLE_DATA_KEY]
if example_type == "json_object":
return input_example
if example_type == "ndarray":
return parse_inputs_data(input_example, schema=input_schema)
if example_type in ["sparse_matrix_csc", "sparse_matrix_csr"]:
return _read_sparse_matrix_from_json(input_example, example_type)
if example_type == "dataframe":
return dataframe_from_parsed_json(input_example, pandas_orient="split", schema=input_schema)
raise MlflowException(
"Malformed input example metadata. The 'type' field must be one of "
"'dataframe', 'ndarray', 'sparse_matrix_csc', 'sparse_matrix_csr' or 'json_object'."
)
def _read_example_params(mlflow_model: Model, path: str):
"""
Read params of input_example from a model directory. Returns None if there is no params
in the input_example or the model was saved without example.
"""
if (
mlflow_model.saved_input_example_info is None
or mlflow_model.saved_input_example_info.get(EXAMPLE_PARAMS_KEY, None) is None
):
return None
input_example_dict = _get_mlflow_model_input_example_dict(mlflow_model, path)
return input_example_dict[EXAMPLE_PARAMS_KEY]
def _read_tensor_input_from_json(path_or_data, schema=None):
if isinstance(path_or_data, str) and os.path.exists(path_or_data):
with open(path_or_data) as handle:
inp_dict = json.load(handle)
else:
inp_dict = path_or_data
return parse_tf_serving_input(inp_dict, schema)
def _read_sparse_matrix_from_json(path_or_data, example_type):
if isinstance(path_or_data, str) and os.path.exists(path_or_data):
with open(path_or_data) as handle:
matrix_data = json.load(handle)
else:
matrix_data = path_or_data
data = matrix_data["data"]
indices = matrix_data["indices"]
indptr = matrix_data["indptr"]
shape = tuple(matrix_data["shape"])
if example_type == "sparse_matrix_csc":
return csc_matrix((data, indices, indptr), shape=shape)
else:
return csr_matrix((data, indices, indptr), shape=shape)
def plot_lines(data_series, xlabel, ylabel, legend_loc=None, line_kwargs=None, title=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
if line_kwargs is None:
line_kwargs = {}
for label, data_x, data_y in data_series:
ax.plot(data_x, data_y, label=label, **line_kwargs)
if legend_loc:
ax.legend(loc=legend_loc)
ax.set(xlabel=xlabel, ylabel=ylabel, title=title)
return fig, ax
def _enforce_tensor_spec(
values: Union[np.ndarray, "csc_matrix", "csr_matrix"],
tensor_spec: TensorSpec,
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
expected_type = tensor_spec.type
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
# This logic is for handling "ragged" arrays. The first check is for a standard numpy shape
# representation of a ragged array. The second is for handling a more manual specification
# of shape while support an input which is a ragged array.
if len(expected_shape) == 1 and expected_shape[0] == -1 and expected_type == np.dtype("O"):
# Sample spec: Tensor('object', (-1,))
# Will pass on any provided input
return values
if (
len(expected_shape) > 1
and -1 in expected_shape[1:]
and len(actual_shape) == 1
and actual_type == np.dtype("O")
):
# Sample spec: Tensor('float64', (-1, -1, -1, 3))
# Will pass on inputs which are ragged arrays: shape==(x,), dtype=='object'
return values
if len(expected_shape) != len(actual_shape):
raise MlflowException(
f"Shape of input {actual_shape} does not match expected shape {expected_shape}."
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
f"Shape of input {actual_shape} does not match expected shape {expected_shape}."
)
if clean_tensor_type(actual_type) != expected_type:
raise MlflowException(
f"dtype of input {actual_type} does not match expected dtype {expected_type}"
)
return values
def _enforce_mlflow_datatype(name, values: pd.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
NB: pandas does not have native decimal data type, when user train and infer
model from pyspark dataframe that contains decimal type, the schema will be
treated as float64.
7. decimal -> double
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
try:
return values.astype(np.dtype("datetime64[ns]"))
except TypeError as e:
raise MlflowException(
"Please ensure that the input data of datetime column only contains timezone-naive "
f"datetime objects. Error: {e}"
)
if t == DataType.datetime and (values.dtype == object or values.dtype == t.to_python()):
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.dtype("datetime64[ns]"), errors="raise")
except ValueError as e:
raise MlflowException(
f"Failed to convert column {name} from type {values.dtype} to {t}."
) from e
if t == DataType.boolean and values.dtype == object:
# Should not convert type otherwise it converts None to boolean False
return values
if t == DataType.double and values.dtype == decimal.Decimal:
# NB: Pyspark Decimal column get converted to decimal.Decimal when converted to pandas
# DataFrame. In order to support decimal data training from spark data frame, we add this
# conversion even we might lose the precision.
try:
return pd.to_numeric(values, errors="raise")
except ValueError:
raise MlflowException(
f"Failed to convert column {name} from type {values.dtype} to {t}."
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# support converting long -> float/double for 0 and 1 values
def all_zero_or_ones(xs):
return all(pd.isnull(x) or x in [0, 1] for x in xs)
if (
values.dtype == np.int64
and numpy_type in (np.float32, np.float64)
and all_zero_or_ones(values)
):
return values.astype(numpy_type, errors="raise")
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pd.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
f"Incompatible input types for column {name}. "
f"Can not safely convert {values.dtype} to {numpy_type}.{hint}"
)
# dtype -> possible value types mapping
_ALLOWED_CONVERSIONS_FOR_PARAMS = {
DataType.long: (DataType.integer,),
DataType.float: (DataType.integer, DataType.long),
DataType.double: (DataType.integer, DataType.long, DataType.float),
}
def _enforce_param_datatype(value: Any, dtype: DataType):
"""
Enforce the value matches the data type. This is used to enforce params datatype.
The returned data is of python built-in type or a datetime object.
The following type conversions are allowed:
1. int -> long, float, double
2. long -> float, double
3. float -> double
4. any -> datetime (try conversion)
Any other type mismatch will raise error.
Args:
value: parameter value
dtype: expected data type
"""
if value is None:
return
if dtype == DataType.datetime:
try:
datetime_value = np.datetime64(value).item()
if isinstance(datetime_value, int):
raise MlflowException.invalid_parameter_value(
f"Failed to convert value to `{dtype}`. "
f"It must be convertible to datetime.date/datetime, got `{value}`"
)
return datetime_value
except ValueError as e:
raise MlflowException.invalid_parameter_value(
f"Failed to convert value `{value}` from type `{type(value)}` to `{dtype}`"
) from e
# Note that np.isscalar(datetime.date(...)) is False
if not np.isscalar(value):
raise MlflowException.invalid_parameter_value(
f"Value must be a scalar for type `{dtype}`, got `{value}`"
)
# Always convert to python native type for params
if DataType.check_type(dtype, value):
return dtype.to_python()(value)
if dtype in _ALLOWED_CONVERSIONS_FOR_PARAMS and any(
DataType.check_type(t, value) for t in _ALLOWED_CONVERSIONS_FOR_PARAMS[dtype]
):
try:
return dtype.to_python()(value)
except ValueError as e:
raise MlflowException.invalid_parameter_value(
f"Failed to convert value `{value}` from type `{type(value)}` to `{dtype}`"
) from e
raise MlflowException.invalid_parameter_value(
f"Can not safely convert `{type(value)}` to `{dtype}` for value `{value}`"
)
def _enforce_unnamed_col_schema(pf_input: pd.DataFrame, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
input_names = pf_input.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pf_input = {}
for i, x in enumerate(input_names):
if isinstance(input_types[i], DataType):
new_pf_input[x] = _enforce_mlflow_datatype(x, pf_input[x], input_types[i])
# If the input_type is objects/arrays/maps, we assume pf_input must be a pandas DataFrame.
# Otherwise, the schema is not valid.
else:
new_pf_input[x] = pd.Series(
[_enforce_type(obj, input_types[i]) for obj in pf_input[x]], name=x
)
return pd.DataFrame(new_pf_input)
def _enforce_named_col_schema(pf_input: pd.DataFrame, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
input_names = input_schema.input_names()
input_dict = input_schema.input_dict()
new_pf_input = {}
for name in input_names:
input_type = input_dict[name].type
required = input_dict[name].required
if name not in pf_input:
if required:
raise MlflowException(
f"The input column '{name}' is required by the model "
"signature but missing from the input data."
)
else:
continue
if isinstance(input_type, DataType):
new_pf_input[name] = _enforce_mlflow_datatype(name, pf_input[name], input_type)
# If the input_type is objects/arrays/maps, we assume pf_input must be a pandas DataFrame.
# Otherwise, the schema is not valid.
else:
new_pf_input[name] = pd.Series(
[_enforce_type(obj, input_type, required) for obj in pf_input[name]], name=name
)
return pd.DataFrame(new_pf_input)
def _reshape_and_cast_pandas_column_values(name, pd_series, tensor_spec):
if tensor_spec.shape[0] != -1 or -1 in tensor_spec.shape[1:]:
raise MlflowException(
"For pandas dataframe input, the first dimension of shape must be a variable "
"dimension and other dimensions must be fixed, but in model signature the shape "
f"of {'input ' + name if name else 'the unnamed input'} is {tensor_spec.shape}."
)
if np.isscalar(pd_series[0]):
for shape in [(-1,), (-1, 1)]:
if tensor_spec.shape == shape:
return _enforce_tensor_spec(
np.array(pd_series, dtype=tensor_spec.type).reshape(shape), tensor_spec
)
raise MlflowException(
f"The input pandas dataframe column '{name}' contains scalar "
"values, which requires the shape to be (-1,) or (-1, 1), but got tensor spec "
f"shape of {tensor_spec.shape}.",
error_code=INVALID_PARAMETER_VALUE,
)
elif isinstance(pd_series[0], list) and np.isscalar(pd_series[0][0]):
# If the pandas column contains list type values,
# in this case, the shape and type information is lost,
# so do not enforce the shape and type, instead,
# reshape the array value list to the required shape, and cast value type to
# required type.
reshape_err_msg = (
f"The value in the Input DataFrame column '{name}' could not be converted to the "
f"expected shape of: '{tensor_spec.shape}'. Ensure that each of the input list "
"elements are of uniform length and that the data can be coerced to the tensor "
f"type '{tensor_spec.type}'"
)
try:
flattened_numpy_arr = np.vstack(pd_series.tolist())
reshaped_numpy_arr = flattened_numpy_arr.reshape(tensor_spec.shape).astype(
tensor_spec.type
)
except ValueError:
raise MlflowException(reshape_err_msg, error_code=INVALID_PARAMETER_VALUE)
if len(reshaped_numpy_arr) != len(pd_series):
raise MlflowException(reshape_err_msg, error_code=INVALID_PARAMETER_VALUE)
return reshaped_numpy_arr
elif isinstance(pd_series[0], np.ndarray):
reshape_err_msg = (
f"The value in the Input DataFrame column '{name}' could not be converted to the "
f"expected shape of: '{tensor_spec.shape}'. Ensure that each of the input numpy "
"array elements are of uniform length and can be reshaped to above expected shape."
)
try:
# Because numpy array includes precise type information, so we don't convert type
# here, so that in following schema validation we can have strict type check on
# numpy array column.
reshaped_numpy_arr = np.vstack(pd_series.tolist()).reshape(tensor_spec.shape)
except ValueError:
raise MlflowException(reshape_err_msg, error_code=INVALID_PARAMETER_VALUE)
if len(reshaped_numpy_arr) != len(pd_series):
raise MlflowException(reshape_err_msg, error_code=INVALID_PARAMETER_VALUE)
return reshaped_numpy_arr
else:
raise MlflowException(
"Because the model signature requires tensor spec input, the input "
"pandas dataframe values should be either scalar value, python list "
"containing scalar values or numpy array containing scalar values, "
"other types are not supported.",
error_code=INVALID_PARAMETER_VALUE,
)
def _enforce_tensor_schema(pf_input: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
def _is_sparse_matrix(x):
if not HAS_SCIPY:
# we can safely assume that it's not a sparse matrix if scipy is not installed
return False
return isinstance(x, (csr_matrix, csc_matrix))
if input_schema.has_input_names():
if isinstance(pf_input, dict):
new_pf_input = {}
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pf_input[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
f" array, but a dict with value type {type(pf_input[col_name])} was found.",
error_code=INVALID_PARAMETER_VALUE,
)
new_pf_input[col_name] = _enforce_tensor_spec(pf_input[col_name], tensor_spec)
elif isinstance(pf_input, pd.DataFrame):
new_pf_input = {}
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
pd_series = pf_input[col_name]
new_pf_input[col_name] = _reshape_and_cast_pandas_column_values(
col_name, pd_series, tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, or a pandas"
" DataFrame input containing columns mapping input name to flattened list value"
f" from tensor, but an input of type {type(pf_input)} was found.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
tensor_spec = input_schema.inputs[0]
if isinstance(pf_input, pd.DataFrame):
num_input_columns = len(pf_input.columns)
if pf_input.empty:
raise MlflowException("Input DataFrame is empty.")
elif num_input_columns == 1:
new_pf_input = _reshape_and_cast_pandas_column_values(
None, pf_input[pf_input.columns[0]], tensor_spec
)
else:
if tensor_spec.shape != (-1, num_input_columns):
raise MlflowException(
"This model contains a model signature with an unnamed input. Since the "
"input data is a pandas DataFrame containing multiple columns, "
"the input shape must be of the structure "
"(-1, number_of_dataframe_columns). "
f"Instead, the input DataFrame passed had {num_input_columns} columns and "
f"an input shape of {tensor_spec.shape} with all values within the "
"DataFrame of scalar type. Please adjust the passed in DataFrame to "
"match the expected structure",
error_code=INVALID_PARAMETER_VALUE,
)
new_pf_input = _enforce_tensor_spec(pf_input.to_numpy(), tensor_spec)
elif isinstance(pf_input, np.ndarray) or _is_sparse_matrix(pf_input):
new_pf_input = _enforce_tensor_spec(pf_input, tensor_spec)
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input or a pandas dataframe input with"
f" proper column values, but an input of type {type(pf_input)} was found.",
error_code=INVALID_PARAMETER_VALUE,
)
return new_pf_input
def _enforce_schema(pf_input: PyFuncInput, input_schema: Schema, flavor: str | None = None):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For Pyspark DataFrame inputs, MLflow casts a sample of the PySpark DataFrame into a Pandas
DataFrame. MLflow will only enforce the schema on a subset of the data rows.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
def _is_scalar(x):
return np.isscalar(x) or x is None
original_pf_input = pf_input
if isinstance(pf_input, pd.Series):
pf_input = pd.DataFrame(pf_input)
if not input_schema.is_tensor_spec():
# convert single DataType to pandas DataFrame
if np.isscalar(pf_input):
pf_input = pd.DataFrame([pf_input])
elif isinstance(pf_input, dict):
# keys are column names
if any(
isinstance(col_spec.type, (Array, Object)) for col_spec in input_schema.inputs
) or all(
_is_scalar(value)
or (isinstance(value, list) and all(isinstance(item, str) for item in value))
for value in pf_input.values()
):
pf_input = pd.DataFrame([pf_input])
else:
try:
# This check is specifically to handle the serving structural cast for
# certain inputs for the transformers implementation. Due to the fact that
# specific Pipeline types in transformers support passing input data
# of the form Dict[str, str] in which the value is a scalar string, model
# serving will cast this entry as a numpy array with shape () and size 1.
# This is seen as a scalar input when attempting to create a Pandas
# DataFrame from such a numpy structure and requires the array to be
# encapsulated in a list in order to prevent a ValueError exception for
# requiring an index if passing in all scalar values thrown by Pandas.
if all(
isinstance(value, np.ndarray)
and value.dtype.type == np.str_
and value.size == 1
and value.shape == ()
for value in pf_input.values()
):
pf_input = pd.DataFrame([pf_input])
elif any(
isinstance(value, np.ndarray) and value.ndim > 1
for value in pf_input.values()
):
# Pandas DataFrames can't be constructed with embedded multi-dimensional
# numpy arrays. Accordingly, we convert any multi-dimensional numpy
# arrays to lists before constructing a DataFrame. This is safe because
# ColSpec model signatures do not support array columns, so subsequent
# validation logic will result in a clear "incompatible input types"
# exception. This is preferable to a pandas DataFrame construction error
pf_input = pd.DataFrame(
{
key: (
value.tolist()
if (isinstance(value, np.ndarray) and value.ndim > 1)
else value
)
for key, value in pf_input.items()
}
)
else:
pf_input = pd.DataFrame(pf_input)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
f" {e}"
)
elif isinstance(pf_input, (list, np.ndarray, pd.Series)):
pf_input = pd.DataFrame(pf_input)
elif HAS_PYSPARK and isinstance(pf_input, SparkDataFrame):
pf_input = pf_input.limit(10).toPandas()
for field in original_pf_input.schema.fields:
if isinstance(field.dataType, (StructType, ArrayType)):
pf_input[field.name] = pf_input[field.name].apply(
lambda row: convert_complex_types_pyspark_to_pandas(row, field.dataType)
)
if not isinstance(pf_input, pd.DataFrame):
raise MlflowException(
f"Expected input to be DataFrame. Found: {type(pf_input).__name__}"
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.required_input_names()
optional_names = input_schema.optional_input_names()
expected_required_cols = set(input_names)
actual_cols = set()
optional_cols = set(optional_names)
if len(expected_required_cols) == 1 and isinstance(pf_input, np.ndarray):
# for schemas with a single column, match input with column
pf_input = {input_names[0]: pf_input}
actual_cols = expected_required_cols
elif isinstance(pf_input, pd.DataFrame):
actual_cols = set(pf_input.columns)
elif isinstance(pf_input, dict):
actual_cols = set(pf_input.keys())
missing_cols = expected_required_cols - actual_cols
extra_cols = actual_cols - expected_required_cols - optional_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
# If the user has set MLFLOW_DISABLE_SCHEMA_DETAILS to true, we raise a generic error
if MLFLOW_DISABLE_SCHEMA_DETAILS.get():
message = "Input schema validation failed. Mismatched or missing input(s)."
if extra_cols:
message += " Note that there were extra inputs provided."
else:
message = f"Model is missing inputs {missing_cols}."
if extra_cols:
message += f" Note that there were extra inputs: {extra_cols}."
raise MlflowException(message)
if extra_cols:
_logger.warning(
"Found extra inputs in the model input that are not defined in the model "
f"signature: `{extra_cols}`. These inputs will be ignored."
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pf_input.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{} inputs but the provided value only has "
"{} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
if input_schema.is_tensor_spec():
return _enforce_tensor_schema(pf_input, input_schema)
elif HAS_PYSPARK and isinstance(original_pf_input, SparkDataFrame):
return _enforce_pyspark_dataframe_schema(
original_pf_input, pf_input, input_schema, flavor=flavor
)
else:
# pf_input must be a pandas Dataframe at this point
return (
_enforce_named_col_schema(pf_input, input_schema)
if input_schema.has_input_names()
else _enforce_unnamed_col_schema(pf_input, input_schema)
)
def _enforce_pyspark_dataframe_schema(
original_pf_input: SparkDataFrame,
pf_input_as_pandas,
input_schema: Schema,
flavor: str | None = None,
):
"""
Enforce that the input PySpark DataFrame conforms to the model's input schema.
This function creates a new DataFrame that only includes the columns from the original
DataFrame that are declared in the model's input schema. Any extra columns in the original
DataFrame are dropped.Note that this function does not modify the original DataFrame.
Args:
original_pf_input: Original input PySpark DataFrame.
pf_input_as_pandas: Input DataFrame converted to pandas.
input_schema: Expected schema of the input DataFrame.
flavor: Optional model flavor. If specified, it is used to handle specific behaviors
for different model flavors. Currently, only the '_FEATURE_STORE_FLAVOR' is
handled specially.
Returns:
New PySpark DataFrame that conforms to the model's input schema.
"""
if not HAS_PYSPARK:
raise MlflowException("PySpark is not installed. Cannot handle a PySpark DataFrame.")
new_pf_input = original_pf_input.alias("pf_input_copy")
if input_schema.has_input_names():
_enforce_named_col_schema(pf_input_as_pandas, input_schema)
input_names = input_schema.input_names()
else:
_enforce_unnamed_col_schema(pf_input_as_pandas, input_schema)
input_names = pf_input_as_pandas.columns[: len(input_schema.inputs)]
columns_to_drop = []
columns_not_dropped_for_feature_store_model = []
for col, dtype in new_pf_input.dtypes:
if col not in input_names:
# to support backwards compatibility with feature store models
if any(x in dtype for x in ["array", "map", "struct"]):
if flavor == _FEATURE_STORE_FLAVOR:
columns_not_dropped_for_feature_store_model.append(col)
continue
columns_to_drop.append(col)
if columns_not_dropped_for_feature_store_model:
_logger.warning(
"The following columns are not in the model signature but "
"are not dropped for feature store model: %s",
", ".join(columns_not_dropped_for_feature_store_model),
)
return new_pf_input.drop(*columns_to_drop)
def _enforce_datatype(data: Any, dtype: DataType, required=True):
if not required and _is_none_or_nan(data):
return None
if not isinstance(dtype, DataType):
raise MlflowException(f"Expected dtype to be DataType, got {type(dtype).__name__}")
if not np.isscalar(data):
raise MlflowException(f"Expected data to be scalar, got {type(data).__name__}")
# Reuse logic in _enforce_mlflow_datatype for type conversion
pd_series = pd.Series(data)
try:
pd_series = _enforce_mlflow_datatype("", pd_series, dtype)
except MlflowException:
raise MlflowException(
f"Failed to enforce schema of data `{data}` with dtype `{dtype.name}`"
)
return pd_series[0]
def _enforce_array(data: Any, arr: Array, required: bool = True):
"""
Enforce data against an Array type.
If the field is required, then the data must be provided.
If Array's internal dtype is AnyType, then None and empty lists are also accepted.
"""
if not required or isinstance(arr.dtype, AnyType):
if data is None or (isinstance(data, (list, np.ndarray)) and len(data) == 0):
return data
if not isinstance(data, (list, np.ndarray)):
raise MlflowException(f"Expected data to be list or numpy array, got {type(data).__name__}")
if isinstance(arr.dtype, DataType):
# TODO: this is still significantly slower than direct np.asarray dtype conversion
# pd.Series conversion can be removed once we support direct validation on the numpy array
data_enforced = (
_enforce_mlflow_datatype("", pd.Series(data), arr.dtype).to_numpy(
dtype=arr.dtype.to_numpy()
)
if len(data) > 0
else data
)
else:
data_enforced = [_enforce_type(x, arr.dtype, required=required) for x in data]
if isinstance(data, list) and isinstance(data_enforced, np.ndarray):
data_enforced = data_enforced.tolist()
elif isinstance(data, np.ndarray) and isinstance(data_enforced, list):
data_enforced = np.array(data_enforced)
return data_enforced
def _enforce_property(data: Any, property: Property):
return _enforce_type(data, property.dtype, required=property.required)
def _enforce_object(data: dict[str, Any], obj: Object, required: bool = True):
if HAS_PYSPARK and isinstance(data, Row):
data = None if len(data) == 0 else data.asDict(True)
if not required and (data is None or data == {}):
return data
if not isinstance(data, dict):
raise MlflowException(
f"Failed to enforce schema of '{data}' with type '{obj}'. "
f"Expected data to be dictionary, got {type(data).__name__}"
)
if not isinstance(obj, Object):
raise MlflowException(
f"Failed to enforce schema of '{data}' with type '{obj}'. "
f"Expected obj to be Object, got {type(obj).__name__}"
)
properties = {prop.name: prop for prop in obj.properties}
required_props = {k for k, prop in properties.items() if prop.required}
if missing_props := required_props - set(data.keys()):
raise MlflowException(f"Missing required properties: {missing_props}")
if invalid_props := data.keys() - properties.keys():
raise MlflowException(
f"Invalid properties not defined in the schema found: {invalid_props}"
)
for k, v in data.items():
try:
data[k] = _enforce_property(v, properties[k])
except MlflowException as e:
raise MlflowException(
f"Failed to enforce schema for key `{k}`. "
f"Expected type {properties[k].to_dict()[k]['type']}, "
f"received type {type(v).__name__}"
) from e
return data
def _enforce_map(data: Any, map_type: Map, required: bool = True):
if (not required or isinstance(map_type.value_type, AnyType)) and (data is None or data == {}):
return data
if not isinstance(data, dict):
raise MlflowException(f"Expected data to be a dict, got {type(data).__name__}")
if not all(isinstance(k, str) for k in data):
raise MlflowException("Expected all keys in the map type data are string type.")
return {k: _enforce_type(v, map_type.value_type, required=required) for k, v in data.items()}
def _enforce_type(data: Any, data_type: DataType | Array | Object | Map, required=True):
if isinstance(data_type, DataType):
return _enforce_datatype(data, data_type, required=required)
if isinstance(data_type, Array):
return _enforce_array(data, data_type, required=required)
if isinstance(data_type, Object):
return _enforce_object(data, data_type, required=required)
if isinstance(data_type, Map):
return _enforce_map(data, data_type, required=required)
if isinstance(data_type, AnyType):
return data
raise MlflowException(f"Invalid data type: {data_type!r}")
def validate_schema(data: PyFuncInput, expected_schema: Schema) -> None:
"""
Validate that the input data has the expected schema.
Args:
data: Input data to be validated. Supported types are:
- pandas.DataFrame
- pandas.Series
- numpy.ndarray
- scipy.sparse.csc_matrix
- scipy.sparse.csr_matrix
- List[Any]
- Dict[str, Any]
- str
expected_schema: Expected Schema of the input data.
Raises:
mlflow.exceptions.MlflowException: when the input data does not match the schema.
.. code-block:: python
:caption: Example usage of validate_schema
import mlflow.models
# Suppose you've already got a model_uri
model_info = mlflow.models.get_model_info(model_uri)
# Get model signature directly
model_signature = model_info.signature
# validate schema
mlflow.models.validate_schema(input_data, model_signature.inputs)
"""
_enforce_schema(data, expected_schema)
def add_libraries_to_model(model_uri, run_id=None, registered_model_name=None):
"""
Given a registered model_uri (e.g. models:/<model_name>/<model_version>), this utility
re-logs the model along with all the required model libraries back to the Model Registry.
The required model libraries are stored along with the model as model artifacts. In
addition, supporting files to the model (e.g. conda.yaml, requirements.txt) are modified
to use the added libraries.
By default, this utility creates a new model version under the same registered model specified
by ``model_uri``. This behavior can be overridden by specifying the ``registered_model_name``
argument.
Args:
model_uri: A registered model uri in the Model Registry of the form
models:/<model_name>/<model_version/stage/latest>
run_id: The ID of the run to which the model with libraries is logged. If None, the model
with libraries is logged to the source run corresponding to model version
specified by ``model_uri``; if the model version does not have a source run, a
new run created.
registered_model_name: The new model version (model with its libraries) is
registered under the inputted registered_model_name. If None, a
new version is logged to the existing model in the Model Registry.
.. note::
This utility only operates on a model that has been registered to the Model Registry.
.. note::
The libraries are only compatible with the platform on which they are added. Cross platform
libraries are not supported.
.. code-block:: python
:caption: Example
# Create and log a model to the Model Registry
import pandas as pd
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
import mlflow
import mlflow.sklearn
from mlflow.models import infer_signature
with mlflow.start_run():
iris = datasets.load_iris()
iris_train = pd.DataFrame(iris.data, columns=iris.feature_names)
clf = RandomForestClassifier(max_depth=7, random_state=0)
clf.fit(iris_train, iris.target)
signature = infer_signature(iris_train, clf.predict(iris_train))
mlflow.sklearn.log_model(
clf,
name="iris_rf",
signature=signature,
registered_model_name="model-with-libs",
)
# model uri for the above model
model_uri = "models:/model-with-libs/1"
# Import utility
from mlflow.models.utils import add_libraries_to_model
# Log libraries to the original run of the model
add_libraries_to_model(model_uri)
# Log libraries to some run_id
existing_run_id = "21df94e6bdef4631a9d9cb56f211767f"
add_libraries_to_model(model_uri, run_id=existing_run_id)
# Log libraries to a new run
with mlflow.start_run():
add_libraries_to_model(model_uri)
# Log libraries to a new registered model named 'new-model'
with mlflow.start_run():
add_libraries_to_model(model_uri, registered_model_name="new-model")
"""
import mlflow
from mlflow.models.wheeled_model import WheeledModel
if mlflow.active_run() is None:
if run_id is None:
run_id = get_model_version_from_model_uri(model_uri).run_id
with mlflow.start_run(run_id):
return WheeledModel.log_model(model_uri, registered_model_name)
else:
return WheeledModel.log_model(model_uri, registered_model_name)
def get_model_version_from_model_uri(model_uri):
"""
Helper function to fetch a model version from a model uri of the form
models:/<model_name>/<model_version/stage/latest>.
"""
import mlflow
from mlflow import MlflowClient
databricks_profile_uri = (
get_databricks_profile_uri_from_artifact_uri(model_uri) or mlflow.get_registry_uri()
)
client = MlflowClient(registry_uri=databricks_profile_uri)
(name, version) = get_model_name_and_version(client, model_uri)
return client.get_model_version(name, version)
def _enforce_params_schema(params: dict[str, Any] | None, schema: ParamSchema | None):
if schema is None:
if params in [None, {}]:
return params
params_info = (
f"Ignoring provided params: {list(params.keys())}"
if isinstance(params, dict)
else "Ignoring invalid params (not a dictionary)."
)
_logger.warning(
"`params` can only be specified at inference time if the model signature "
f"defines a params schema. This model does not define a params schema. {params_info}",
)
return {}
params = {} if params is None else params
if not isinstance(params, dict):
raise MlflowException.invalid_parameter_value(
f"Parameters must be a dictionary. Got type '{type(params).__name__}'.",
)
if not isinstance(schema, ParamSchema):
raise MlflowException.invalid_parameter_value(
"Parameters schema must be an instance of ParamSchema. "
f"Got type '{type(schema).__name__}'.",
)
if any(not isinstance(k, str) for k in params.keys()):
_logger.warning(
"Keys in parameters should be of type `str`, but received non-string keys."
"Converting all keys to string..."
)
params = {str(k): v for k, v in params.items()}
allowed_keys = {param.name for param in schema.params}
if ignored_keys := set(params) - allowed_keys:
_logger.warning(
f"Unrecognized params {list(ignored_keys)} are ignored for inference. "
f"Supported params are: {allowed_keys}. "
"To enable them, please add corresponding schema in ModelSignature."
)
params = {k: params[k] for k in params if k in allowed_keys}
invalid_params = set()
for param_spec in schema.params:
if param_spec.name in params:
try:
params[param_spec.name] = ParamSpec.validate_param_spec(
params[param_spec.name], param_spec
)
except MlflowException as e:
invalid_params.add((param_spec.name, e.message))
else:
params[param_spec.name] = param_spec.default
if invalid_params:
raise MlflowException.invalid_parameter_value(
f"Invalid parameters found: {invalid_params!r}",
)
return params
def convert_complex_types_pyspark_to_pandas(value, dataType):
# This function is needed because the default `asDict` function in PySpark
# converts the data to Python types, which is not compatible with the schema enforcement.
type_mapping = {
IntegerType: lambda v: np.int32(v),
ShortType: lambda v: np.int16(v),
FloatType: lambda v: np.float32(v),
DateType: lambda v: v.strftime("%Y-%m-%d"),
TimestampType: lambda v: v.strftime("%Y-%m-%d %H:%M:%S.%f"),
BinaryType: lambda v: np.bytes_(v),
}
if value is None:
return None
if isinstance(dataType, StructType):
return {
field.name: convert_complex_types_pyspark_to_pandas(value[field.name], field.dataType)
for field in dataType.fields
}
elif isinstance(dataType, ArrayType):
return [
convert_complex_types_pyspark_to_pandas(elem, dataType.elementType) for elem in value
]
if converter := type_mapping.get(type(dataType)):
return converter(value)
return value
def _is_in_comment(line, start):
"""
Check if the code at the index "start" of the line is in a comment.
Limitations: This function does not handle multi-line comments, and the # symbol could be in a
string, or otherwise not indicate a comment.
"""
return "#" in line[:start]
def _is_in_string_only(line, search_string):
"""
Check is the search_string
Limitations: This function does not handle multi-line strings.
"""
# Regex for matching double quotes and everything inside
double_quotes_regex = r"\"(\\.|[^\"])*\""
# Regex for matching single quotes and everything inside
single_quotes_regex = r"\'(\\.|[^\'])*\'"
# Regex for matching search_string exactly
search_string_regex = rf"({re.escape(search_string)})"
# Concatenate the patterns using the OR operator '|'
# This will matches left to right - on quotes first, search_string last
pattern = double_quotes_regex + r"|" + single_quotes_regex + r"|" + search_string_regex
# Iterate through all matches in the line
for match in re.finditer(pattern, line):
# If the regex matched on the search_string, we know that it did not match in quotes since
# that is the order. So we know that the search_string exists outside of quotes
# (at least once).
if match.group() == search_string:
return False
return True
def _validate_model_code_from_notebook(code):
"""
Validate there isn't any code that would work in a notebook but not as exported Python file.
For now, this checks for dbutils and magic commands.
"""
output_code_list = []
for line in code.splitlines():
for match in re.finditer(r"\bdbutils\b", line):
start = match.start()
if not _is_in_comment(line, start) and not _is_in_string_only(line, "dbutils"):
_logger.warning(
"The model file uses 'dbutils' commands which are not supported. To ensure "
"your code functions correctly, make sure that it does not rely on these "
"dbutils commands for correctness."
)
# Prefix any line containing MAGIC commands with a comment. When there is better support
# for the Databricks workspace export API, we can get rid of this.
if line.startswith("%"):
output_code_list.append("# MAGIC " + line)
else:
output_code_list.append(line)
output_code = "\n".join(output_code_list)
magic_regex = r"^# MAGIC %((?!pip)\S+).*"
if re.search(magic_regex, output_code, re.MULTILINE):
_logger.warning(
"The model file uses magic commands which have been commented out. To ensure your code "
"functions correctly, make sure that it does not rely on these magic commands for "
"correctness."
)
return output_code.encode("utf-8")
def _convert_llm_ndarray_to_list(data):
"""
Convert numpy array in the input data to list, because numpy array is not json serializable.
"""
if isinstance(data, np.ndarray):
return data.tolist()
if isinstance(data, list):
return [_convert_llm_ndarray_to_list(d) for d in data]
if isinstance(data, dict):
return {k: _convert_llm_ndarray_to_list(v) for k, v in data.items()}
# scalar values are also converted to numpy types, but they are
# not acceptable by the model
if np.isscalar(data) and isinstance(data, np.generic):
return data.item()
return data
def _convert_llm_input_data(data: Any) -> list[Any] | dict[str, Any]:
"""
Convert input data to a format that can be passed to the model with GenAI flavors such as
LangChain and LLamaIndex.
Args
data: Input data to be converted. We assume it is a single request payload, but it can be
in any format such as a single scalar value, a dictionary, list (with one element),
Pandas DataFrame, etc.
"""
# This handles pyfunc / spark_udf inputs with model signature. Schema enforcement convert
# the input data to pandas DataFrame, so we convert it back.
if isinstance(data, pd.DataFrame):
# if the data only contains a single key as 0, we assume the input
# is either a string or list of strings
if list(data.columns) == [0]:
data = data.to_dict("list")[0]
else:
data = data.to_dict(orient="records")
return _convert_llm_ndarray_to_list(data)
def _databricks_path_exists(path: Path) -> bool:
"""
Check if a path exists in Databricks workspace.
"""
if not is_in_databricks_runtime():
return False
from databricks.sdk import WorkspaceClient
from databricks.sdk.errors import ResourceDoesNotExist
client = WorkspaceClient()
try:
client.workspace.get_status(str(path))
return True
except ResourceDoesNotExist:
return False
def _validate_and_get_model_code_path(model_code_path: str, temp_dir: str) -> str:
"""
Validate model code path exists. When failing to open the model file on Databricks,
creates a temp file in temp_dir and validate its contents if it's a notebook.
Returns either `model_code_path` or a temp file path with the contents of the notebook.
"""
# If the path is not a absolute path then convert it
model_code_path = Path(model_code_path).resolve()
if not (model_code_path.exists() or _databricks_path_exists(model_code_path)):
additional_message = (
f" Perhaps you meant '{model_code_path}.py'?" if not model_code_path.suffix else ""
)
raise MlflowException.invalid_parameter_value(
f"The provided model path '{model_code_path}' does not exist. "
f"Ensure the file path is valid and try again.{additional_message}"
)
try:
# If `model_code_path` points to a notebook on Databricks, this line throws either
# a `FileNotFoundError` or an `OSError`. In this case, try to export the notebook as
# a Python file.
with open(model_code_path):
pass
return str(model_code_path)
except Exception:
pass
try:
from databricks.sdk import WorkspaceClient
from databricks.sdk.service.workspace import ExportFormat
w = WorkspaceClient()
response = w.workspace.export(path=model_code_path, format=ExportFormat.SOURCE)
decoded_content = base64.b64decode(response.content)
except Exception:
raise MlflowException.invalid_parameter_value(
f"The provided model path '{model_code_path}' is not a valid Python file path or a "
"Databricks Notebook file path containing the code for defining the chain "
"instance. Ensure the file path is valid and try again."
)
_validate_model_code_from_notebook(decoded_content.decode("utf-8"))
path = os.path.join(temp_dir, "model.py")
with open(path, "wb") as f:
f.write(decoded_content)
return path
@contextmanager
def _config_context(config: str | dict[str, Any] | None = None):
# Check if config_path is None and set it to "" so when loading the model
# the config_path is set to "" so the ModelConfig can correctly check if the
# config is set or not
if config is None:
config = ""
_set_model_config(config)
try:
yield
finally:
_set_model_config(None)
| _Example |
python | scipy__scipy | scipy/stats/_sampling.py | {
"start": 12113,
"end": 12267
} | class ____:
def __init__(self, pdf, args):
self._pdf = lambda x: pdf(x, *args)
def pdf(self, x):
return self._pdf(x)
| CustomDistPINV |
python | celery__celery | t/smoke/operations/worker_restart.py | {
"start": 110,
"end": 1369
} | class ____:
"""Restarts a worker in different ways."""
class Method(Enum):
POOL_RESTART = auto()
DOCKER_RESTART_GRACEFULLY = auto()
DOCKER_RESTART_FORCE = auto()
def restart_worker(
self,
worker: CeleryTestWorker,
method: WorkerRestart.Method,
assertion: bool = True,
) -> None:
"""Restart a Celery worker.
Args:
worker (CeleryTestWorker): Worker to restart.
method (WorkerRestart.Method): The method to restart the worker.
assertion (bool, optional): Whether to assert the worker state after restart. Defaults to True.
"""
if method == WorkerRestart.Method.POOL_RESTART:
worker.app.control.pool_restart()
worker.container.reload()
if method == WorkerRestart.Method.DOCKER_RESTART_GRACEFULLY:
worker.restart()
if method == WorkerRestart.Method.DOCKER_RESTART_FORCE:
worker.restart(force=True)
if assertion:
assert worker.container.status == "running", (
f"Worker container should be in 'running' state after restart, "
f"but is in '{worker.container.status}' state instead."
)
| WorkerRestart |
python | getsentry__sentry | src/sentry/api/endpoints/project_transaction_threshold_override.py | {
"start": 778,
"end": 2179
} | class ____(serializers.Serializer):
transaction = serializers.CharField(required=True, max_length=200)
threshold = serializers.IntegerField(required=True, max_value=MAX_VALUE)
metric = serializers.CharField(required=True)
def validate_metric(self, metric):
for key, value in TRANSACTION_METRICS.items():
if value == metric:
return key
raise serializers.ValidationError(f"Invalid transaction metric - {metric}")
def validate_threshold(self, threshold):
if threshold % 100:
raise serializers.ValidationError("Invalid threshold - specify a multiple of 100")
return threshold
def validate(self, data):
data = super().validate(data)
organization = self.context.get("organization")
project = self.context.get("project")
count = (
ProjectTransactionThresholdOverride.objects.filter(
project=project, organization=organization
)
.exclude(transaction=data["transaction"])
.count()
)
if count >= MAX_TRANSACTION_THRESHOLDS_PER_PROJECT:
raise serializers.ValidationError(
f"At most {MAX_TRANSACTION_THRESHOLDS_PER_PROJECT} configured transaction thresholds per project."
)
return data
@region_silo_endpoint
| ProjectTransactionThresholdOverrideSerializer |
python | rq__rq | rq/worker.py | {
"start": 2908,
"end": 65525
} | class ____:
redis_worker_namespace_prefix = 'rq:worker:'
redis_workers_keys = worker_registration.REDIS_WORKER_KEYS
death_penalty_class = get_default_death_penalty_class()
queue_class = Queue
job_class = Job
# `log_result_lifespan` controls whether "Result is kept for XXX seconds"
# messages are logged after every job, by default they are.
log_result_lifespan = True
# `log_job_description` is used to toggle logging an entire jobs description.
log_job_description = True
# factor to increase connection_wait_time in case of continuous connection failures.
exponential_backoff_factor = 2.0
# Max Wait time (in seconds) after which exponential_backoff_factor won't be applicable.
max_connection_wait_time = 60.0
def __init__(
self,
queues: Sequence[Union[str, 'Queue']],
name: Optional[str] = None,
default_result_ttl=DEFAULT_RESULT_TTL,
connection: Optional['Redis'] = None,
exc_handler=None,
exception_handlers=None,
maintenance_interval: int = DEFAULT_MAINTENANCE_TASK_INTERVAL,
default_worker_ttl: Optional[int] = None, # TODO remove this arg in 3.0
worker_ttl: Optional[int] = None,
job_class: Optional[Union[type[Job], str]] = None,
queue_class: Optional[Union[type[Queue], str]] = None,
log_job_description: bool = True,
job_monitoring_interval=DEFAULT_JOB_MONITORING_INTERVAL,
disable_default_exception_handler: bool = False,
prepare_for_work: bool = True,
serializer: Optional[Union[Serializer, str]] = None,
work_horse_killed_handler: Optional[Callable[[Job, int, int, 'struct_rusage'], None]] = None,
): # noqa
self.default_result_ttl = default_result_ttl
if worker_ttl:
self.worker_ttl = worker_ttl
elif default_worker_ttl:
warnings.warn('default_worker_ttl is deprecated, use worker_ttl.', DeprecationWarning, stacklevel=2)
self.worker_ttl = default_worker_ttl
else:
self.worker_ttl = DEFAULT_WORKER_TTL
self.job_monitoring_interval = job_monitoring_interval
self.maintenance_interval = maintenance_interval
if not connection:
connection = get_connection_from_queues(queues)
assert connection
connection = self._set_connection(connection)
self.connection = connection
self.redis_server_version = None
if job_class:
self.job_class = import_job_class(job_class) if isinstance(job_class, str) else job_class
if queue_class:
self.queue_class = import_queue_class(queue_class) if isinstance(queue_class, str) else queue_class
self.version: str = VERSION
self.python_version: str = sys.version
self.serializer = resolve_serializer(serializer)
self.execution: Optional[Execution] = None
queues = [
(
self.queue_class(
name=q,
connection=connection,
job_class=self.job_class,
serializer=self.serializer,
death_penalty_class=self.death_penalty_class,
)
if isinstance(q, str)
else q
)
for q in ensure_job_list(queues)
]
self.name: str = name or uuid4().hex
self.queues: list[Queue] = queues
self.validate_queues()
self._ordered_queues = self.queues[:]
self._exc_handlers: list[Callable] = []
self._work_horse_killed_handler = work_horse_killed_handler
self._shutdown_requested_date: Optional[datetime] = None
self._state: str = 'starting'
self._is_horse: bool = False
self._horse_pid: int = 0
self._stop_requested: bool = False
self._stopped_job_id = None
self.log = logger
self.log_job_description = log_job_description
self.last_cleaned_at = None
self.successful_job_count: int = 0
self.failed_job_count: int = 0
self.total_working_time: float = 0
self.current_job_working_time: float = 0
self.birth_date = None
self.scheduler: Optional[RQScheduler] = None
self.pubsub: Optional[PubSub] = None
self.pubsub_thread = None
self._dequeue_strategy: Optional[DequeueStrategy] = DequeueStrategy.DEFAULT
self.disable_default_exception_handler = disable_default_exception_handler
if prepare_for_work:
self.hostname: Optional[str] = socket.gethostname()
self.pid: Optional[int] = os.getpid()
try:
connection.client_setname(self.name)
except redis.exceptions.ResponseError:
warnings.warn('CLIENT SETNAME command not supported, setting ip_address to unknown', Warning)
self.ip_address = 'unknown'
else:
client_addresses = [
client['addr'] for client in connection.client_list() if client.get('name') == self.name
]
if len(client_addresses) > 0:
self.ip_address = client_addresses[0]
else:
warnings.warn('CLIENT LIST command not supported, setting ip_address to unknown', Warning)
self.ip_address = 'unknown'
else:
self.hostname = None
self.pid = None
self.ip_address = 'unknown'
if isinstance(exception_handlers, (list, tuple)):
for handler in exception_handlers:
self.push_exc_handler(handler)
elif exception_handlers is not None:
self.push_exc_handler(exception_handlers)
@classmethod
def find_by_key(
cls,
worker_key: str,
connection: 'Redis',
job_class: Optional[type['Job']] = None,
queue_class: Optional[type['Queue']] = None,
serializer: Optional[Union[Serializer, str]] = None,
) -> Optional['BaseWorker']:
"""Returns a Worker instance, based on the naming conventions for
naming the internal Redis keys. Can be used to reverse-lookup Workers
by their Redis keys.
Args:
worker_key (str): The worker key
connection (Optional[Redis], optional): Redis connection. Defaults to None.
job_class (Optional[Type[Job]], optional): The job class if custom class is being used. Defaults to None.
queue_class (Optional[Type[Queue]]): The queue class if a custom class is being used. Defaults to None.
serializer (Optional[Union[Serializer, str]], optional): The serializer to use. Defaults to None.
Raises:
ValueError: If the key doesn't start with `rq:worker:`, the default worker namespace prefix.
Returns:
worker (Worker): The Worker instance.
"""
prefix = cls.redis_worker_namespace_prefix
if not worker_key.startswith(prefix):
raise ValueError('Not a valid RQ worker key: %s' % worker_key)
if not connection.exists(worker_key):
connection.srem(cls.redis_workers_keys, worker_key)
return None
name = worker_key[len(prefix) :]
worker = cls(
[],
name,
connection=connection,
job_class=job_class,
queue_class=queue_class,
prepare_for_work=False,
serializer=serializer,
)
worker.refresh()
return worker
@classmethod
def all(
cls,
connection: Optional['Redis'] = None,
job_class: Optional[type['Job']] = None,
queue_class: Optional[type['Queue']] = None,
queue: Optional['Queue'] = None,
serializer=None,
) -> list['BaseWorker']:
"""Returns an iterable of all Workers.
Returns:
workers (List[Worker]): A list of workers
"""
if queue:
connection = queue.connection
assert connection
worker_keys = worker_registration.get_keys(queue=queue, connection=connection)
workers = [
cls.find_by_key(
key, connection=connection, job_class=job_class, queue_class=queue_class, serializer=serializer
)
for key in worker_keys
]
return compact(workers)
@classmethod
def all_keys(cls, connection: Optional['Redis'] = None, queue: Optional['Queue'] = None) -> list[str]:
"""List of worker keys
Args:
connection (Optional[Redis], optional): A Redis Connection. Defaults to None.
queue (Optional[Queue], optional): The Queue. Defaults to None.
Returns:
list_keys (List[str]): A list of worker keys
"""
return [as_text(key) for key in worker_registration.get_keys(queue=queue, connection=connection)]
@classmethod
def count(cls, connection: Optional['Redis'] = None, queue: Optional['Queue'] = None) -> int:
"""Returns the number of workers by queue or connection.
Args:
connection (Optional[Redis], optional): Redis connection. Defaults to None.
queue (Optional[Queue], optional): The queue to use. Defaults to None.
Returns:
length (int): The queue length.
"""
return len(worker_registration.get_keys(queue=queue, connection=connection))
def refresh(self):
"""Refreshes the worker data.
It will get the data from the datastore and update the Worker's attributes
"""
raw_data = self.connection.hgetall(self.key)
if not raw_data:
return
data = decode_redis_hash(raw_data, decode_values=True)
self.hostname = data.get('hostname') or None
self.ip_address = data.get('ip_address') or None
self.pid = int(data['pid']) if data.get('pid') else None
self.version = data.get('version') or VERSION
self.python_version = data.get('python_version') or sys.version
self._state = data.get('state', '?')
self._job_id = data.get('current_job')
if data.get('last_heartbeat'):
self.last_heartbeat = utcparse(data['last_heartbeat'])
else:
self.last_heartbeat = None
if data.get('birth'):
self.birth_date = utcparse(data['birth'])
else:
self.birth_date = None
self.failed_job_count = int(data['failed_job_count']) if data.get('failed_job_count') else 0
self.successful_job_count = int(data['successful_job_count']) if data.get('successful_job_count') else 0
self.total_working_time = float(data['total_working_time']) if data.get('total_working_time') else 0
self.current_job_working_time = (
float(data['current_job_working_time']) if data.get('current_job_working_time') else 0
)
if data.get('queues'):
self.queues = [
self.queue_class(
queue, connection=self.connection, job_class=self.job_class, serializer=self.serializer
)
for queue in data['queues'].split(',')
]
@property
def should_run_maintenance_tasks(self):
"""Maintenance tasks should run on first startup or every 10 minutes."""
if self.last_cleaned_at is None:
return True
if (now() - self.last_cleaned_at) > timedelta(seconds=self.maintenance_interval):
return True
return False
def _set_connection(self, connection: 'Redis') -> 'Redis':
"""Configures the Redis connection's socket timeout.
This will timeout the connection in case any specific command hangs at any given time (eg. BLPOP), but
also ensures that the timeout is long enough for those operations.
If the connection provided already has an adequate `socket_timeout` defined, skips.
Args:
connection (Optional[Redis]): The Redis Connection.
"""
current_socket_timeout = connection.connection_pool.connection_kwargs.get('socket_timeout')
if current_socket_timeout is None or current_socket_timeout < self.connection_timeout:
timeout_config = {'socket_timeout': self.connection_timeout}
connection.connection_pool.connection_kwargs.update(timeout_config)
return connection
@property
def dequeue_timeout(self) -> int:
return max(1, self.worker_ttl - 15)
@property
def connection_timeout(self) -> int:
return self.dequeue_timeout + 10
def clean_registries(self):
"""Runs maintenance jobs on each Queue's registries."""
for queue in self.queues:
# If there are multiple workers running, we only want 1 worker
# to run clean_registries().
if queue.acquire_maintenance_lock():
self.log.info('Worker %s: cleaning registries for queue: %s', self.name, queue.name)
clean_registries(queue, self._exc_handlers)
worker_registration.clean_worker_registry(queue)
queue.intermediate_queue.cleanup(self, queue)
queue.release_maintenance_lock()
self.last_cleaned_at = now()
def get_redis_server_version(self):
"""Return Redis server version of connection"""
if not self.redis_server_version:
self.redis_server_version = get_version(self.connection)
return self.redis_server_version
def validate_queues(self):
"""Sanity check for the given queues."""
for queue in self.queues:
if not isinstance(queue, self.queue_class):
raise TypeError(f'{queue} is not of type {self.queue_class} or string types')
def queue_names(self) -> list[str]:
"""Returns the queue names of this worker's queues.
Returns:
List[str]: The queue names.
"""
return [queue.name for queue in self.queues]
def queue_keys(self) -> list[str]:
"""Returns the Redis keys representing this worker's queues.
Returns:
List[str]: The list of strings with queues keys
"""
return [queue.key for queue in self.queues]
@property
def key(self):
"""Returns the worker's Redis hash key."""
return self.redis_worker_namespace_prefix + self.name
@property
def pubsub_channel_name(self):
"""Returns the worker's Redis hash key."""
return PUBSUB_CHANNEL_TEMPLATE % self.name
def request_stop(self, signum, frame):
"""Stops the current worker loop but waits for child processes to
end gracefully (warm shutdown).
Args:
signum (Any): Signum
frame (Any): Frame
"""
self.log.debug('Worker %s: got signal %s', self.name, signal_name(signum))
self._shutdown_requested_date = now()
signal.signal(signal.SIGINT, self.request_force_stop)
signal.signal(signal.SIGTERM, self.request_force_stop)
self.handle_warm_shutdown_request()
self._shutdown()
def _shutdown(self):
"""
If shutdown is requested in the middle of a job, wait until
finish before shutting down and save the request in redis
"""
if self.get_state() == WorkerStatus.BUSY:
self._stop_requested = True
self.set_shutdown_requested_date()
self.log.debug(
'Worker %s: stopping after current horse is finished. Press Ctrl+C again for a cold shutdown.',
self.name,
)
if self.scheduler:
self.stop_scheduler()
else:
if self.scheduler:
self.stop_scheduler()
raise StopRequested()
def request_force_stop(self, signum: int, frame: Optional[FrameType]):
"""Terminates the application (cold shutdown).
Args:
signum (int): Signal number
frame (Optional[FrameType]): Frame
Raises:
SystemExit: SystemExit
"""
# When worker is run through a worker pool, it may receive duplicate signals
# One is sent by the pool when it calls `pool.stop_worker()` and another is sent by the OS
# when user hits Ctrl+C. In this case if we receive the second signal within 1 second,
# we ignore it.
if (now() - self._shutdown_requested_date) < timedelta(seconds=1): # type: ignore
self.log.debug('Worker %s: shutdown signal ignored, received twice in less than 1 second', self.name)
return
self.log.warning('Worker %s: cold shut down', self.name)
# Take down the horse with the worker
if self.horse_pid:
self.log.debug('Worker %s: taking down horse %s with me', self.name, self.horse_pid)
self.kill_horse()
self.wait_for_horse()
raise SystemExit()
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM gracefully."""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def execute_job(self, job: 'Job', queue: 'Queue'):
"""To be implemented by subclasses."""
raise NotImplementedError
def work(
self,
burst: bool = False,
logging_level: Optional[str] = None,
date_format: str = DEFAULT_LOGGING_DATE_FORMAT,
log_format: str = DEFAULT_LOGGING_FORMAT,
max_jobs: Optional[int] = None,
max_idle_time: Optional[int] = None,
with_scheduler: bool = False,
dequeue_strategy: DequeueStrategy = DequeueStrategy.DEFAULT,
) -> bool:
"""Starts the work loop.
Pops and performs all jobs on the current list of queues. When all
queues are empty, block and wait for new jobs to arrive on any of the
queues, unless `burst` mode is enabled.
If `max_idle_time` is provided, worker will die when it's idle for more than the provided value.
The return value indicates whether any jobs were processed.
Args:
burst (bool, optional): Whether to work on burst mode. Defaults to False.
logging_level (Optional[str], optional): Logging level to use.
If not provided, defaults to "INFO" unless a class-level logging level is already set.
date_format (str, optional): Date Format. Defaults to DEFAULT_LOGGING_DATE_FORMAT.
log_format (str, optional): Log Format. Defaults to DEFAULT_LOGGING_FORMAT.
max_jobs (Optional[int], optional): Max number of jobs. Defaults to None.
max_idle_time (Optional[int], optional): Max seconds for worker to be idle. Defaults to None.
with_scheduler (bool, optional): Whether to run the scheduler in a separate process. Defaults to False.
dequeue_strategy (DequeueStrategy, optional): Which strategy to use to dequeue jobs.
Defaults to DequeueStrategy.DEFAULT
Returns:
worked (bool): Will return True if any job was processed, False otherwise.
"""
self.bootstrap(logging_level, date_format, log_format)
self._dequeue_strategy = dequeue_strategy
completed_jobs = 0
if with_scheduler:
self._start_scheduler(burst, logging_level, date_format, log_format)
self._install_signal_handlers()
try:
while True:
try:
self.check_for_suspension(burst)
if self.should_run_maintenance_tasks:
self.run_maintenance_tasks()
if self._stop_requested:
self.log.info('Worker %s: stopping on request', self.name)
break
timeout = None if burst else self.dequeue_timeout
result = self.dequeue_job_and_maintain_ttl(timeout, max_idle_time)
if result is None:
if burst:
self.log.info('Worker %s: done, quitting', self.name)
elif max_idle_time is not None:
self.log.info('Worker %s: idle for %d seconds, quitting', self.name, max_idle_time)
break
job, queue = result
self.execute_job(job, queue)
self.heartbeat()
completed_jobs += 1
if max_jobs is not None:
if completed_jobs >= max_jobs:
self.log.info('Worker %s: finished executing %d jobs, quitting', self.name, completed_jobs)
break
except redis.exceptions.TimeoutError:
self.log.error('Worker %s: Redis connection timeout, quitting...', self.name)
break
except StopRequested:
break
except SystemExit:
# Cold shutdown detected
raise
except: # noqa
self.log.error('Worker %s: found an unhandled exception, quitting...', self.name, exc_info=True)
break
finally:
self.teardown()
return bool(completed_jobs)
def cleanup_execution(self, job: 'Job', pipeline: 'Pipeline'):
"""Cleans up the execution of a job.
It will remove the job execution record from the `StartedJobRegistry` and delete the Execution object.
"""
self.log.debug('Cleaning up execution of job %s', job.id)
self.set_current_job_id(None, pipeline=pipeline)
if self.execution is not None:
self.execution.delete(job=job, pipeline=pipeline)
self.execution = None
def handle_warm_shutdown_request(self):
self.log.info('Worker %s [PID %d]: warm shut down requested', self.name, self.pid)
def reorder_queues(self, reference_queue: 'Queue'):
"""Reorder the queues according to the strategy.
As this can be defined both in the `Worker` initialization or in the `work` method,
it doesn't take the strategy directly, but rather uses the private `_dequeue_strategy` attribute.
Args:
reference_queue (Union[Queue, str]): The queues to reorder
"""
if self._dequeue_strategy is None:
self._dequeue_strategy = DequeueStrategy.DEFAULT
if self._dequeue_strategy not in ('default', 'random', 'round_robin'):
raise ValueError(
f'Dequeue strategy {self._dequeue_strategy} is not allowed. Use `default`, `random` or `round_robin`.'
)
if self._dequeue_strategy == DequeueStrategy.DEFAULT:
return
if self._dequeue_strategy == DequeueStrategy.ROUND_ROBIN:
pos = self._ordered_queues.index(reference_queue)
self._ordered_queues = self._ordered_queues[pos + 1 :] + self._ordered_queues[: pos + 1]
return
if self._dequeue_strategy == DequeueStrategy.RANDOM:
shuffle(self._ordered_queues)
return
def handle_job_failure(self, job: 'Job', queue: 'Queue', started_job_registry=None, exc_string=''):
"""
Handles the failure or an executing job by:
1. Setting the job status to failed
2. Removing the job from StartedJobRegistry
3. Setting the workers current job to None
4. Add the job to FailedJobRegistry
`save_exc_to_job` should only be used for testing purposes
"""
self.log.debug('Worker %s: handling failed execution of job %s', self.name, job.id)
with self.connection.pipeline() as pipeline:
if started_job_registry is None:
started_job_registry = StartedJobRegistry(
job.origin, self.connection, job_class=self.job_class, serializer=self.serializer
)
# check whether a job was stopped intentionally and set the job
# status appropriately if it was this job.
job_is_stopped = self._stopped_job_id == job.id
retry = job.should_retry and not job_is_stopped
if job_is_stopped:
job.set_status(JobStatus.STOPPED, pipeline=pipeline)
self._stopped_job_id = None
else:
# Requeue/reschedule if retry is configured, otherwise
if not retry:
job.set_status(JobStatus.FAILED, pipeline=pipeline)
self.cleanup_execution(job, pipeline=pipeline)
if not self.disable_default_exception_handler and not retry:
job._handle_failure(exc_string, pipeline=pipeline, worker_name=self.name)
with suppress(redis.exceptions.ConnectionError):
pipeline.execute()
self.increment_failed_job_count(pipeline)
if job.started_at and job.ended_at:
self.increment_total_working_time(job.ended_at - job.started_at, pipeline)
if retry:
job.retry(queue, pipeline)
enqueue_dependents = False
else:
enqueue_dependents = True
try:
pipeline.execute()
if enqueue_dependents:
queue.enqueue_dependents(job)
except Exception as e:
# Ensure that custom exception handlers are called
# even if Redis is down
self.log.error(
'Worker %s: exception during pipeline execute or enqueue_dependents for job %s: %s',
self.name,
job.id,
e,
)
pass
def set_current_job_working_time(self, current_job_working_time: float, pipeline: Optional['Pipeline'] = None):
"""Sets the current job working time in seconds
Args:
current_job_working_time (float): The current job working time in seconds
pipeline (Optional[Pipeline], optional): Pipeline to use. Defaults to None.
"""
self.current_job_working_time = current_job_working_time
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, 'current_job_working_time', current_job_working_time)
def set_current_job_id(self, job_id: Optional[str] = None, pipeline: Optional['Pipeline'] = None):
"""Sets the current job id.
If `None` is used it will delete the current job key.
Args:
job_id (Optional[str], optional): The job id. Defaults to None.
pipeline (Optional[Pipeline], optional): The pipeline to use. Defaults to None.
"""
connection = pipeline if pipeline is not None else self.connection
if job_id is None:
connection.hdel(self.key, 'current_job')
else:
connection.hset(self.key, 'current_job', job_id)
def get_current_job_id(self, pipeline: Optional['Pipeline'] = None) -> Optional[str]:
"""Retrieves the current job id.
Args:
pipeline (Optional['Pipeline'], optional): The pipeline to use. Defaults to None.
Returns:
job_id (Optional[str): The job id
"""
connection = pipeline if pipeline is not None else self.connection
result = connection.hget(self.key, 'current_job')
if result is None:
return None
return as_text(result)
def get_current_job(self) -> Optional['Job']:
"""Returns the currently executing job instance.
Returns:
job (Job): The job instance.
"""
job_id = self.get_current_job_id()
if job_id is None:
return None
return self.job_class.fetch(job_id, self.connection, self.serializer)
def set_state(self, state: str, pipeline: Optional['Pipeline'] = None):
"""Sets the worker's state.
Args:
state (str): The state
pipeline (Optional[Pipeline], optional): The pipeline to use. Defaults to None.
"""
self._state = state
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, 'state', state)
def _set_state(self, state):
"""Raise a DeprecationWarning if ``worker.state = X`` is used"""
warnings.warn('worker.state is deprecated, use worker.set_state() instead.', DeprecationWarning)
self.set_state(state)
def get_state(self) -> str:
return self._state
def _get_state(self):
"""Raise a DeprecationWarning if ``worker.state == X`` is used"""
warnings.warn('worker.state is deprecated, use worker.get_state() instead.', DeprecationWarning)
return self.get_state()
state = property(_get_state, _set_state)
def _start_scheduler(
self,
burst: bool = False,
logging_level: Optional[str] = 'INFO',
date_format: str = DEFAULT_LOGGING_DATE_FORMAT,
log_format: str = DEFAULT_LOGGING_FORMAT,
):
"""Starts the scheduler process.
This is specifically designed to be run by the worker when running the `work()` method.
Instantiates the RQScheduler and tries to acquire a lock.
If the lock is acquired, start scheduler.
If worker is on burst mode just enqueues scheduled jobs and quits,
otherwise, starts the scheduler in a separate process.
Args:
burst (bool, optional): Whether to work on burst mode. Defaults to False.
logging_level (str, optional): Logging level to use. Defaults to "INFO".
date_format (str, optional): Date Format. Defaults to DEFAULT_LOGGING_DATE_FORMAT.
log_format (str, optional): Log Format. Defaults to DEFAULT_LOGGING_FORMAT.
"""
self.scheduler = RQScheduler(
self.queues,
connection=self.connection,
logging_level=logging_level if logging_level is not None else self.log.level,
date_format=date_format,
log_format=log_format,
serializer=self.serializer,
)
self.scheduler.acquire_locks()
if self.scheduler.acquired_locks:
if burst:
self.scheduler.enqueue_scheduled_jobs()
self.scheduler.release_locks()
else:
self.scheduler.start()
def register_birth(self):
"""Registers its own birth."""
self.log.debug('Worker %s: registering birth', self.name)
if self.connection.exists(self.key) and not self.connection.hexists(self.key, 'death'):
msg = 'There exists an active worker named {0!r} already'
raise ValueError(msg.format(self.name))
key = self.key
queues = ','.join(self.queue_names())
with self.connection.pipeline() as p:
p.delete(key)
right_now = now()
now_in_string = utcformat(right_now)
self.birth_date = right_now
mapping = {
'birth': now_in_string,
'last_heartbeat': now_in_string,
'queues': queues,
'pid': self.pid,
'hostname': self.hostname,
'ip_address': self.ip_address,
'version': self.version,
'python_version': self.python_version,
}
p.hset(key, mapping=mapping)
worker_registration.register(self, p)
p.expire(key, self.worker_ttl + 60)
p.execute()
def register_death(self):
"""Registers its own death."""
self.log.debug('Worker %s: registering death', self.name)
with self.connection.pipeline() as p:
# We cannot use self.state = 'dead' here, because that would
# rollback the pipeline
worker_registration.unregister(self, p)
p.hset(self.key, 'death', utcformat(now()))
p.expire(self.key, 60)
p.execute()
@property
def horse_pid(self):
"""The horse's process ID. Only available in the worker. Will return
0 in the horse part of the fork.
"""
return self._horse_pid
def bootstrap(
self,
logging_level: Optional[str] = 'INFO',
date_format: str = DEFAULT_LOGGING_DATE_FORMAT,
log_format: str = DEFAULT_LOGGING_FORMAT,
):
"""Bootstraps the worker.
Runs the basic tasks that should run when the worker actually starts working.
Used so that new workers can focus on the work loop implementation rather
than the full bootstrapping process.
Args:
logging_level (str, optional): Logging level to use. Defaults to "INFO".
date_format (str, optional): Date Format. Defaults to DEFAULT_LOGGING_DATE_FORMAT.
log_format (str, optional): Log Format. Defaults to DEFAULT_LOGGING_FORMAT.
"""
setup_loghandlers(logging_level, date_format, log_format, name='rq.worker')
setup_loghandlers(logging_level, date_format, log_format, name='rq.job')
self.register_birth()
self.log.info('Worker %s: started with PID %d, version %s', self.name, os.getpid(), VERSION)
self.subscribe()
self.set_state(WorkerStatus.STARTED)
qnames = self.queue_names()
self.log.info('*** Listening on %s...', green(', '.join(qnames)))
def check_for_suspension(self, burst: bool):
"""Check to see if workers have been suspended by `rq suspend`"""
before_state = None
notified = False
while not self._stop_requested and is_suspended(self.connection, self):
if burst:
self.log.info('Worker %s: suspended in burst mode, exiting', self.name)
self.log.info('Worker %s: note: there could still be unfinished jobs on the queue', self.name)
raise StopRequested
if not notified:
self.log.info('Worker %s: suspended, run `rq resume` to resume', self.name)
before_state = self.get_state()
self.set_state(WorkerStatus.SUSPENDED)
notified = True
time.sleep(1)
if before_state:
self.set_state(before_state)
def procline(self, message):
"""Changes the current procname for the process.
This can be used to make `ps -ef` output more readable.
"""
setprocname(f'rq:worker:{self.name}: {message}')
def set_shutdown_requested_date(self):
"""Sets the date on which the worker received a (warm) shutdown request"""
self.connection.hset(self.key, 'shutdown_requested_date', utcformat(self._shutdown_requested_date))
@property
def shutdown_requested_date(self):
"""Fetches shutdown_requested_date from Redis."""
shutdown_requested_timestamp = self.connection.hget(self.key, 'shutdown_requested_date')
if shutdown_requested_timestamp is not None:
return utcparse(as_text(shutdown_requested_timestamp))
@property
def death_date(self):
"""Fetches death date from Redis."""
death_timestamp = self.connection.hget(self.key, 'death')
if death_timestamp is not None:
return utcparse(as_text(death_timestamp))
def run_maintenance_tasks(self):
"""
Runs periodic maintenance tasks, these include:
1. Check if scheduler should be started. This check should not be run
on first run since worker.work() already calls
`scheduler.enqueue_scheduled_jobs()` on startup.
2. Cleaning registries
No need to try to start scheduler on first run
"""
if self.last_cleaned_at:
if self.scheduler and (not self.scheduler._process or not self.scheduler._process.is_alive()):
self.scheduler.acquire_locks(auto_start=True)
self.clean_registries()
Group.clean_registries(connection=self.connection)
def _pubsub_exception_handler(self, exc: Exception, pubsub: 'PubSub', pubsub_thread: 'PubSubWorkerThread') -> None:
"""
This exception handler allows the pubsub_thread to continue & retry to
connect after a connection problem the same way the main worker loop
indefinitely retries.
redis-py internal mechanism will restore the channels subscriptions
once the connection is re-established.
"""
if isinstance(exc, (redis.exceptions.ConnectionError)):
self.log.error(
'Worker %s: could not connect to Redis instance: %s retrying in %d seconds...',
self.name,
exc,
2,
)
time.sleep(2.0)
else:
self.log.warning('Worker %s: pubsub thread exiting on %s', self.name, exc)
raise
def handle_payload(self, message):
"""Handle external commands"""
self.log.debug('Worker %s: received message: %s', self.name, message)
payload = parse_payload(message)
handle_command(self, payload)
def subscribe(self):
"""Subscribe to this worker's channel"""
self.log.info('Worker %s: subscribing to channel %s', self.name, self.pubsub_channel_name)
self.pubsub = self.connection.pubsub()
self.pubsub.subscribe(**{self.pubsub_channel_name: self.handle_payload})
self.pubsub_thread = self.pubsub.run_in_thread(
sleep_time=60, daemon=True, exception_handler=self._pubsub_exception_handler
)
def get_heartbeat_ttl(self, job: 'Job') -> int:
"""Get's the TTL for the next heartbeat.
Args:
job (Job): The Job
Returns:
int: The heartbeat TTL.
"""
if job.timeout and job.timeout > 0:
remaining_execution_time = job.timeout - self.current_job_working_time
return int(min(remaining_execution_time, self.job_monitoring_interval)) + 60
else:
return self.job_monitoring_interval + 60
def prepare_execution(self, job: 'Job') -> Execution:
"""This method is called by the main `Worker` (not the horse) as it prepares for execution.
Do not confuse this with worker.prepare_job_execution() which is called by the horse.
"""
with self.connection.pipeline() as pipeline:
heartbeat_ttl = self.get_heartbeat_ttl(job)
self.execution = Execution.create(job, heartbeat_ttl, pipeline=pipeline)
self.set_state(WorkerStatus.BUSY, pipeline=pipeline)
pipeline.execute()
return self.execution
def unsubscribe(self):
"""Unsubscribe from pubsub channel"""
if self.pubsub_thread:
self.log.info('Worker %s: unsubscribing from channel %s', self.name, self.pubsub_channel_name)
self.pubsub.unsubscribe()
self.pubsub_thread.stop()
self.pubsub_thread.join(timeout=1)
self.pubsub.close()
def dequeue_job_and_maintain_ttl(
self, timeout: Optional[int], max_idle_time: Optional[int] = None
) -> Optional[tuple['Job', 'Queue']]:
"""Dequeues a job while maintaining the TTL.
Returns:
result (Tuple[Job, Queue]): A tuple with the job and the queue.
"""
result = None
qnames = ','.join(self.queue_names())
self.set_state(WorkerStatus.IDLE)
self.procline('Listening on ' + qnames)
self.log.debug('Worker %s: *** Listening on %s...', self.name, green(qnames))
connection_wait_time = 1.0
idle_since = now()
idle_time_left = max_idle_time
while True:
try:
self.heartbeat()
if self.should_run_maintenance_tasks:
self.run_maintenance_tasks()
if timeout is not None and idle_time_left is not None:
timeout = min(timeout, idle_time_left)
self.log.debug(
'Worker %s: dequeueing jobs on queues %s and timeout %s', self.name, green(qnames), timeout
)
result = self.queue_class.dequeue_any(
self._ordered_queues,
timeout,
connection=self.connection,
job_class=self.job_class,
serializer=self.serializer,
death_penalty_class=self.death_penalty_class,
)
if result is not None:
job, queue = result
self.reorder_queues(reference_queue=queue)
self.log.debug('Worker %s: dequeued job %s from %s', self.name, blue(job.id), green(queue.name))
job.redis_server_version = self.get_redis_server_version()
if self.log_job_description:
self.log.info('%s: %s (%s)', green(queue.name), blue(job.description), job.id)
else:
self.log.info('%s: %s', green(queue.name), job.id)
break
except DequeueTimeout:
if max_idle_time is not None:
idle_for = (now() - idle_since).total_seconds()
idle_time_left = math.ceil(max_idle_time - idle_for)
if idle_time_left <= 0:
break
except redis.exceptions.ConnectionError as conn_err:
self.log.error(
'Worker %s: could not connect to Redis instance: %s retrying in %d seconds...',
self.name,
conn_err,
connection_wait_time,
)
time.sleep(connection_wait_time)
connection_wait_time *= self.exponential_backoff_factor
connection_wait_time = min(connection_wait_time, self.max_connection_wait_time)
self.heartbeat()
return result
def heartbeat(self, timeout: Optional[int] = None, pipeline: Optional['Pipeline'] = None):
"""Specifies a new worker timeout, typically by extending the
expiration time of the worker, effectively making this a "heartbeat"
to not expire the worker until the timeout passes.
The next heartbeat should come before this time, or the worker will
die (at least from the monitoring dashboards).
If no timeout is given, the worker_ttl will be used to update
the expiration time of the worker.
Args:
timeout (Optional[int]): Timeout
pipeline (Optional[Redis]): A Redis pipeline
"""
timeout = timeout or self.worker_ttl + 60
connection: Union[Redis, Pipeline] = pipeline if pipeline is not None else self.connection
connection.expire(self.key, timeout)
connection.hset(self.key, 'last_heartbeat', utcformat(now()))
self.log.debug(
'Worker %s: sent heartbeat to prevent worker timeout. Next one should arrive in %s seconds.',
self.name,
timeout,
)
def maintain_heartbeats(self, job: 'Job'):
"""Updates worker, execution and job's last heartbeat fields."""
with self.connection.pipeline() as pipeline:
self.heartbeat(self.job_monitoring_interval + 60, pipeline=pipeline)
ttl = int(self.get_heartbeat_ttl(job))
# Also need to update execution's heartbeat
self.execution.heartbeat(job.started_job_registry, ttl, pipeline=pipeline) # type: ignore
# After transition to job execution is complete, `job.heartbeat()` is no longer needed
job.heartbeat(now(), ttl, pipeline=pipeline, xx=True)
results = pipeline.execute()
# If job was enqueued with `result_ttl=0` (job is deleted as soon as it finishes),
# a race condition could happen where heartbeat arrives after job has been deleted,
# leaving a job key that contains only `last_heartbeat` field.
# job.heartbeat() uses hset() to update job's timestamp. This command returns 1 if a new
# Redis key is created, 0 otherwise. So in this case we check the return of job's
# heartbeat() command. If a new key was created, this means the job was already
# deleted. In this case, we simply send another delete command to remove the key.
# https://github.com/rq/rq/issues/1450
if results[7] == 1:
self.connection.delete(job.key)
def teardown(self):
if not self.is_horse:
if self.scheduler:
self.stop_scheduler()
self.register_death()
self.unsubscribe()
def stop_scheduler(self):
"""Ensure scheduler process is stopped
Will send the kill signal to scheduler process,
if there's an OSError, just passes and `join()`'s the scheduler process,
waiting for the process to finish.
"""
if self.scheduler._process and self.scheduler._process.pid:
try:
os.kill(self.scheduler._process.pid, signal.SIGTERM)
except OSError:
pass
self.scheduler._process.join()
def increment_failed_job_count(self, pipeline: Optional['Pipeline'] = None):
"""Used to keep the worker stats up to date in Redis.
Increments the failed job count.
Args:
pipeline (Optional[Pipeline], optional): A Redis Pipeline. Defaults to None.
"""
connection = pipeline if pipeline is not None else self.connection
connection.hincrby(self.key, 'failed_job_count', 1)
def increment_successful_job_count(self, pipeline: Optional['Pipeline'] = None):
"""Used to keep the worker stats up to date in Redis.
Increments the successful job count.
Args:
pipeline (Optional[Pipeline], optional): A Redis Pipeline. Defaults to None.
"""
connection = pipeline if pipeline is not None else self.connection
connection.hincrby(self.key, 'successful_job_count', 1)
def increment_total_working_time(self, job_execution_time: timedelta, pipeline: 'Pipeline'):
"""Used to keep the worker stats up to date in Redis.
Increments the time the worker has been working for (in seconds).
Args:
job_execution_time (timedelta): A timedelta object.
pipeline (Optional[Pipeline], optional): A Redis Pipeline. Defaults to None.
"""
pipeline.hincrbyfloat(self.key, 'total_working_time', job_execution_time.total_seconds())
def handle_exception(self, job: 'Job', *exc_info):
"""Walks the exception handler stack to delegate exception handling.
If the job cannot be deserialized, it will raise when func_name or
the other properties are accessed, which will stop exceptions from
being properly logged, so we guard against it here.
"""
self.log.debug('Worker %s: handling exception for %s.', self.name, job.id)
exc_string = ''.join(traceback.format_exception(*exc_info))
try:
extra = {'func': job.func_name, 'arguments': job.args, 'kwargs': job.kwargs}
func_name = job.func_name
except DeserializationError:
extra = {}
func_name = '<DeserializationError>'
# the properties below should be safe however
extra.update({'queue': job.origin, 'job_id': job.id})
# func_name
self.log.error(
'Worker %s: job %s: exception raised while executing (%s)\n%s',
self.name,
job.id,
func_name,
exc_string,
extra=extra,
)
for handler in self._exc_handlers:
self.log.debug('Worker %s: invoking exception handler %s', self.name, handler)
fallthrough = handler(job, *exc_info)
# Only handlers with explicit return values should disable further
# exc handling, so interpret a None return value as True.
if fallthrough is None:
fallthrough = True
if not fallthrough:
break
def push_exc_handler(self, handler_func):
"""Pushes an exception handler onto the exc handler stack."""
self._exc_handlers.append(handler_func)
def pop_exc_handler(self):
"""Pops the latest exception handler off of the exc handler stack."""
return self._exc_handlers.pop()
@property
def is_horse(self):
"""Returns whether or not this is the worker or the work horse."""
return self._is_horse
def handle_work_horse_killed(self, job, retpid, ret_val, rusage):
self.log.warning('Work horse killed for job %s: retpid=%s, ret_val=%s', job.id, retpid, ret_val)
if self._work_horse_killed_handler is None:
return
self._work_horse_killed_handler(job, retpid, ret_val, rusage)
def prepare_job_execution(self, job: 'Job', remove_from_intermediate_queue: bool = False) -> None:
"""Performs misc bookkeeping like updating states prior to
job execution.
"""
self.log.debug('Worker %s: preparing for execution of job ID %s', self.name, job.id)
with self.connection.pipeline() as pipeline:
self.set_current_job_id(job.id, pipeline=pipeline)
self.set_current_job_working_time(0, pipeline=pipeline)
heartbeat_ttl = self.get_heartbeat_ttl(job)
self.heartbeat(heartbeat_ttl, pipeline=pipeline)
job.heartbeat(now(), heartbeat_ttl, pipeline=pipeline)
job.prepare_for_execution(self.name, pipeline=pipeline)
if remove_from_intermediate_queue:
from .queue import Queue
queue = Queue(job.origin, connection=self.connection)
pipeline.lrem(queue.intermediate_queue_key, 1, job.id)
pipeline.execute()
self.log.debug('Worker %s: job preparation finished.', self.name)
msg = 'Processing {0} from {1} since {2}'
self.procline(msg.format(job.func_name, job.origin, time.time()))
def handle_job_retry(self, job: 'Job', queue: 'Queue', retry: Retry, started_job_registry: StartedJobRegistry):
"""Handles the retry of certain job.
It will remove the job from the `StartedJobRegistry` and requeue or reschedule the job.
Args:
job (Job): The job that will be retried.
queue (Queue): The queue
started_job_registry (StartedJobRegistry): The started registry
"""
self.log.debug('Worker %s: handling retry of job %s', self.name, job.id)
# Check if job has exceeded max retries
if job.number_of_retries and job.number_of_retries >= retry.max:
# If max retries exceeded, treat as failure
self.log.warning('Worker %s: job %s has exceeded maximum retry attempts (%d)', self.name, job.id, retry.max)
exc_string = f'Job failed after {retry.max} retry attempts'
self.handle_job_failure(job, queue=queue, exc_string=exc_string)
return
# Calculate retry interval based on retry count
retry_interval = Retry.get_interval(job.number_of_retries or 0, retry.intervals)
with self.connection.pipeline() as pipeline:
self.increment_failed_job_count(pipeline=pipeline)
self.increment_total_working_time(job.ended_at - job.started_at, pipeline) # type: ignore
if retry_interval > 0:
# Schedule job for later if there's an interval
scheduled_time = now() + timedelta(seconds=retry_interval)
job.set_status(JobStatus.SCHEDULED, pipeline=pipeline)
queue.schedule_job(job, scheduled_time, pipeline=pipeline)
self.log.debug(
'Worker %s: job %s: scheduled for retry at %s, %s attempts remaining',
self.name,
job.id,
scheduled_time,
retry.max - (job.number_of_retries or 0),
)
else:
self.log.debug(
'Worker %s: job %s: enqueued for retry, %s attempts remaining',
self.name,
job.id,
retry.max - (job.number_of_retries or 0),
)
job._handle_retry_result(queue=queue, pipeline=pipeline, worker_name=self.name)
self.cleanup_execution(job, pipeline=pipeline)
pipeline.execute()
self.log.debug('Worker %s: finished handling retry of job %s', self.name, job.id)
def handle_job_success(self, job: 'Job', queue: 'Queue', started_job_registry: StartedJobRegistry):
"""Handles the successful execution of certain job.
It will remove the job from the `StartedJobRegistry`, adding it to the `SuccessfulJobRegistry`,
and run a few maintenance tasks including:
- Resting the current job ID
- Enqueue dependents
- Incrementing the job count and working time
- Handling of the job successful execution
- If job.repeats_left > 0, it will be scheduled for the next execution.
Runs within a loop with the `watch` method so that protects interactions
with dependents keys.
Args:
job (Job): The job that was successful.
queue (Queue): The queue
started_job_registry (StartedJobRegistry): The started registry
"""
self.log.debug('Worker %s: handling successful execution of job %s', self.name, job.id)
with self.connection.pipeline() as pipeline:
while True:
try:
# if dependencies are inserted after enqueue_dependents
# a WatchError is thrown by execute()
pipeline.watch(job.dependents_key)
# enqueue_dependents might call multi() on the pipeline
self.log.debug('Worker %s: enqueueing dependents of job %s', self.name, job.id)
queue.enqueue_dependents(job, pipeline=pipeline)
if not pipeline.explicit_transaction:
# enqueue_dependents didn't call multi after all!
# We have to do it ourselves to make sure everything runs in a transaction
self.log.debug('Worker %s: calling multi() on pipeline for job %s', self.name, job.id)
pipeline.multi()
self.increment_successful_job_count(pipeline=pipeline)
self.increment_total_working_time(job.ended_at - job.started_at, pipeline) # type: ignore
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
self.log.debug("Worker %s: saving job %s's successful execution result", self.name, job.id)
job._handle_success(result_ttl, pipeline=pipeline, worker_name=self.name)
if job.repeats_left is not None and job.repeats_left > 0:
from .repeat import Repeat
self.log.info(
'Worker %s: job %s scheduled to repeat (%s left)', self.name, job.id, job.repeats_left
)
Repeat.schedule(job, queue, pipeline=pipeline)
else:
job.cleanup(result_ttl, pipeline=pipeline, remove_from_queue=False)
self.log.debug('Cleaning up execution of job %s', job.id)
self.cleanup_execution(job, pipeline=pipeline)
pipeline.execute()
assert job.started_at
assert job.ended_at
time_taken = job.ended_at - job.started_at
if self.log_job_description:
self.log.info(
'Successfully completed %s job in %ss on worker %s', job.description, time_taken, self.name
)
else:
self.log.info(
'Successfully completed job %s in %ss on worker %s', job.id, time_taken, self.name
)
self.log.debug('Worker %s: finished handling successful execution of job %s', self.name, job.id)
break
except redis.exceptions.WatchError:
continue
def handle_execution_ended(self, job: 'Job', queue: 'Queue', heartbeat_ttl: int):
"""Called after job has finished execution."""
job.ended_at = now()
job.heartbeat(now(), heartbeat_ttl)
def perform_job(self, job: 'Job', queue: 'Queue') -> bool:
"""Performs the actual work of a job. Will/should only be called
inside the work horse's process.
Args:
job (Job): The Job
queue (Queue): The Queue
Returns:
bool: True after finished.
"""
started_job_registry = queue.started_job_registry
self.log.debug('Worker %s: started job registry set.', self.name)
try:
remove_from_intermediate_queue = len(self.queues) == 1
self.prepare_job_execution(job, remove_from_intermediate_queue)
job.started_at = now()
timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
self.log.debug('Worker %s: performing job %s ...', self.name, job.id)
return_value = job.perform()
self.log.debug('Worker %s: finished performing job %s', self.name, job.id)
self.handle_execution_ended(job, queue, job.success_callback_timeout)
# Pickle the result in the same try-except block since we need
# to use the same exc handling when pickling fails
job._result = return_value
if isinstance(return_value, Retry):
# Retry the job
self.log.debug('Worker %s: job %s returns a Retry object', self.name, job.id)
self.handle_job_retry(
job=job, queue=queue, retry=return_value, started_job_registry=started_job_registry
)
return True
else:
job.execute_success_callback(self.death_penalty_class, return_value)
self.handle_job_success(job=job, queue=queue, started_job_registry=started_job_registry)
except: # NOQA
self.log.debug('Worker %s: job %s raised an exception.', self.name, job.id)
job._status = JobStatus.FAILED
self.handle_execution_ended(job, queue, job.failure_callback_timeout)
exc_info = sys.exc_info()
exc_string = ''.join(traceback.format_exception(*exc_info))
try:
job.execute_failure_callback(self.death_penalty_class, *exc_info)
except: # noqa
exc_info = sys.exc_info()
exc_string = ''.join(traceback.format_exception(*exc_info))
# TODO: reversing the order of handle_job_failure() and handle_exception()
# causes Sentry test to fail
self.handle_exception(job, *exc_info)
self.handle_job_failure(
job=job, exc_string=exc_string, queue=queue, started_job_registry=started_job_registry
)
return False
self.log.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
if return_value is not None:
self.log.debug('Worker %s: result: %r', self.name, yellow(str(return_value)))
if self.log_result_lifespan:
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl == 0:
self.log.info('Result discarded immediately')
elif result_ttl > 0:
self.log.info('Result is kept for %s seconds', result_ttl)
else:
self.log.info('Result will never expire, clean up result key manually')
return True
def main_work_horse(self, job: 'Job', queue: 'Queue'):
"""This is the entry point of the newly spawned work horse.
After fork()'ing, always assure we are generating random sequences
that are different from the worker.
os._exit() is the way to exit from childs after a fork(), in
contrast to the regular sys.exit()
"""
random.seed()
self.setup_work_horse_signals()
self._is_horse = True
self.log = logger
try:
self.perform_job(job, queue)
except: # noqa
os._exit(1)
os._exit(0)
def setup_work_horse_signals(self):
"""Setup signal handing for the newly spawned work horse
Always ignore Ctrl+C in the work horse, as it might abort the
currently running job.
The main worker catches the Ctrl+C and requests graceful shutdown
after the current work is done. When cold shutdown is requested, it
kills the current job anyway.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def kill_horse(self, sig: signal.Signals = SHUTDOWN_SIGNAL):
"""Kill the work horse process. No-op for workers without child processes."""
pass
def wait_for_horse(self) -> tuple[Optional[int], Optional[int], Optional['struct_rusage']]:
"""Wait for the work horse process to complete. No-op for workers without child processes."""
return None, None, None
def __eq__(self, other):
"""Equality does not take the database/connection into account"""
if not isinstance(other, self.__class__):
raise TypeError('Cannot compare workers to other types (of workers)')
return self.name == other.name
def __hash__(self):
"""The hash does not take the database/connection into account"""
return hash(self.name)
| BaseWorker |
python | Pylons__pyramid | src/pyramid/security.py | {
"start": 6344,
"end": 6694
} | class ____(PermitsResult):
"""
An instance of ``Denied`` is returned when a security-related
API or other :app:`Pyramid` code denies an action unrelated to
an ACL check. It evaluates equal to all boolean false types. It
has an attribute named ``msg`` describing the circumstances for
the deny.
"""
boolval = 0
| Denied |
python | celery__celery | t/unit/worker/test_consumer.py | {
"start": 32072,
"end": 33174
} | class ____:
def test_start(self):
c = Mock()
c.timer = Mock()
c.event_dispatcher = Mock()
with patch('celery.worker.heartbeat.Heart') as hcls:
h = Heart(c)
assert h.enabled
assert h.heartbeat_interval is None
assert c.heart is None
h.start(c)
assert c.heart
hcls.assert_called_with(c.timer, c.event_dispatcher,
h.heartbeat_interval)
c.heart.start.assert_called_with()
def test_start_heartbeat_interval(self):
c = Mock()
c.timer = Mock()
c.event_dispatcher = Mock()
with patch('celery.worker.heartbeat.Heart') as hcls:
h = Heart(c, False, 20)
assert h.enabled
assert h.heartbeat_interval == 20
assert c.heart is None
h.start(c)
assert c.heart
hcls.assert_called_with(c.timer, c.event_dispatcher,
h.heartbeat_interval)
c.heart.start.assert_called_with()
| test_Heart |
python | PyCQA__pylint | tests/functional/r/regression/regression_properties_in_class_context.py | {
"start": 145,
"end": 291
} | class ____(metaclass=Meta):
pass
assert 'foo' in Parent.values # no warning
for value in Parent.values: # no warning
print(value)
| Parent |
python | pytorch__pytorch | test/inductor/test_compiled_autograd.py | {
"start": 135993,
"end": 143553
} | class ____(torch.nn.Module):
def forward(self, inputs, sizes, scalars, hooks, packed_data):
getitem = inputs[0]
getitem_1 = inputs[1]; inputs = None
getitem_2 = sizes[0]
getitem_3 = sizes[1]
getitem_4 = sizes[2]
getitem_5 = sizes[3]
getitem_6 = sizes[4]
getitem_7 = sizes[5]
getitem_8 = sizes[6]
getitem_9 = sizes[7]
getitem_10 = sizes[8]
getitem_11 = sizes[9]
getitem_12 = sizes[10]
getitem_13 = sizes[11]; sizes = None
unwrap_maybe_dynamic_int = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_2); getitem_2 = None
unwrap_maybe_dynamic_int_1 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_3); getitem_3 = None
unwrap_maybe_dynamic_int_2 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_4); getitem_4 = None
unwrap_maybe_dynamic_int_3 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_5); getitem_5 = None
unwrap_maybe_dynamic_int_4 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_6); getitem_6 = None
unwrap_maybe_dynamic_int_5 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_7); getitem_7 = None
unwrap_maybe_dynamic_int_6 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_8); getitem_8 = None
unwrap_maybe_dynamic_int_7 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_9); getitem_9 = None
unwrap_maybe_dynamic_int_8 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_10); getitem_10 = None
unwrap_maybe_dynamic_int_9 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_11); getitem_11 = None
unwrap_maybe_dynamic_int_10 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_12); getitem_12 = None
unwrap_maybe_dynamic_int_11 = torch__dynamo_external_utils_unwrap_maybe_dynamic_int(getitem_13); getitem_13 = None
validate_outputs = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem], [((None, None, device(type='cpu'), 6, 0, None), [], False, 6)]); getitem = None
getitem_14 = validate_outputs[0]; validate_outputs = None
sum_backward0 = torch__dynamo_compiled_autograd_ops_SumBackward0([getitem_14], [True], [unwrap_maybe_dynamic_int, unwrap_maybe_dynamic_int_1]); getitem_14 = unwrap_maybe_dynamic_int = unwrap_maybe_dynamic_int_1 = None
getitem_15 = sum_backward0[0]; sum_backward0 = None
validate_outputs_1 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_15], [((None, None, device(type='cpu'), 6, 0, None), [unwrap_maybe_dynamic_int_2, unwrap_maybe_dynamic_int_3], False, 6)]); getitem_15 = unwrap_maybe_dynamic_int_2 = unwrap_maybe_dynamic_int_3 = None
getitem_16 = validate_outputs_1[0]; validate_outputs_1 = None
getitem_17 = hooks[0]
getitem_18 = packed_data[0]
getitem_19 = hooks[1]
getitem_20 = packed_data[1]
call_hook = torch__dynamo_external_utils_call_hook(getitem_17, getitem_18, hook_type = 'unpack_hook'); getitem_17 = getitem_18 = None
call_hook_1 = torch__dynamo_external_utils_call_hook(getitem_19, getitem_20, hook_type = 'unpack_hook'); getitem_19 = getitem_20 = None
mul_backward0 = torch__dynamo_compiled_autograd_ops_MulBackward0([getitem_16], [True, True], call_hook, 6, call_hook_1, 6); getitem_16 = call_hook = call_hook_1 = None
getitem_21 = mul_backward0[0]
getitem_22 = mul_backward0[1]; mul_backward0 = None
validate_outputs_2 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_21, getitem_22], [((None, None, device(type='cpu'), 6, 0, None), [unwrap_maybe_dynamic_int_4, unwrap_maybe_dynamic_int_5], False, 6), ((None, None, device(type='cpu'), 6, 0, None), [unwrap_maybe_dynamic_int_6, unwrap_maybe_dynamic_int_7], False, 6)]); getitem_21 = getitem_22 = unwrap_maybe_dynamic_int_4 = unwrap_maybe_dynamic_int_5 = unwrap_maybe_dynamic_int_6 = unwrap_maybe_dynamic_int_7 = None
getitem_23 = validate_outputs_2[0]
getitem_24 = validate_outputs_2[1]; validate_outputs_2 = None
getitem_25 = hooks[2]
getitem_26 = packed_data[2]
call_hook_2 = torch__dynamo_external_utils_call_hook(getitem_25, getitem_26, hook_type = 'unpack_hook'); getitem_25 = getitem_26 = None
cos_backward0 = torch__dynamo_compiled_autograd_ops_CosBackward0([getitem_24], [True], call_hook_2); getitem_24 = call_hook_2 = None
getitem_27 = cos_backward0[0]; cos_backward0 = None
validate_outputs_3 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_27], [((None, None, device(type='cpu'), 6, 0, None), [unwrap_maybe_dynamic_int_8, unwrap_maybe_dynamic_int_9], False, 6)]); getitem_27 = unwrap_maybe_dynamic_int_8 = unwrap_maybe_dynamic_int_9 = None
getitem_28 = validate_outputs_3[0]; validate_outputs_3 = None
add = torch.add(getitem_23, getitem_28); getitem_23 = getitem_28 = None
getitem_29 = hooks[3]; hooks = None
getitem_30 = packed_data[3]; packed_data = None
call_hook_3 = torch__dynamo_external_utils_call_hook(getitem_29, getitem_30, hook_type = 'unpack_hook'); getitem_29 = getitem_30 = None
sin_backward0 = torch__dynamo_compiled_autograd_ops_SinBackward0([add], [True], call_hook_3); add = call_hook_3 = None
getitem_31 = sin_backward0[0]; sin_backward0 = None
validate_outputs_4 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_31], [((None, None, device(type='cpu'), 6, 0, None), [unwrap_maybe_dynamic_int_10, unwrap_maybe_dynamic_int_11], False, 6)]); getitem_31 = unwrap_maybe_dynamic_int_10 = unwrap_maybe_dynamic_int_11 = None
getitem_32 = validate_outputs_4[0]; validate_outputs_4 = None
call_accumulate_grad = torch__dynamo_external_utils_call_accumulate_grad(getitem_1, getitem_32, False); getitem_1 = getitem_32 = call_accumulate_grad = None
_exec_final_callbacks_stub = torch__dynamo_external_utils__exec_final_callbacks_stub(); _exec_final_callbacks_stub = None
return []
""", # noqa: B950
)
self.check_output_and_recompiles(
fn,
count=[1, 0],
compiler_fn=make_compiler_fn(backend="ca_eager", gm_hook=check),
)
@requires_cuda_and_triton
def test_cpu_offloading(self):
def fn():
def pack(x):
return x.cpu()
def unpack(x):
return x.cuda()
class MyMatMul(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return torch.matmul(x, x)
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return grad_out * x
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
for i in [10, 100, 10, 20, 30]:
x = torch.randn(i, requires_grad=True).cuda()
MyMatMul.apply(x).sum().backward()
yield x.grad
i = 0
def check(gm):
nonlocal i
if i == 0:
i += 1
return
graph_code = normalize_gm(gm.print_readable(print_output=False))
self.assertExpectedInline(
graph_code,
"""\
| CompiledAutograd0 |
python | tornadoweb__tornado | demos/google_auth/main.py | {
"start": 1208,
"end": 1977
} | class ____(BaseHandler, tornado.auth.GoogleOAuth2Mixin):
@tornado.web.authenticated
async def get(self):
try:
# This is redundant: we got the userinfo in the login handler.
# But this demonstrates the usage of oauth2_request outside of
# the login flow, and getting anything more than userinfo
# leads to more approval prompts and complexity.
user_info = await self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=self.current_user["access_token"],
)
except tornado.httpclient.HTTPClientError as e:
print(e.response.body)
raise
self.write(f"Hello {user_info['name']}")
| IndexHandler |
python | doocs__leetcode | solution/1100-1199/1188.Design Bounded Blocking Queue/Solution.py | {
"start": 34,
"end": 520
} | class ____(object):
def __init__(self, capacity: int):
self.s1 = Semaphore(capacity)
self.s2 = Semaphore(0)
self.q = deque()
def enqueue(self, element: int) -> None:
self.s1.acquire()
self.q.append(element)
self.s2.release()
def dequeue(self) -> int:
self.s2.acquire()
ans = self.q.popleft()
self.s1.release()
return ans
def size(self) -> int:
return len(self.q)
| BoundedBlockingQueue |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/api.py | {
"start": 2753,
"end": 2946
} | class ____:
Edit = 'edit'
Inspect = 'inspect'
Array = 'array'
Export = 'export'
Clear = 'clear'
Image = 'image'
SVG = 'svg'
Quit = 'exit'
| ClientContextMenuSections |
python | pytorch__pytorch | test/quantization/core/experimental/quantization_util.py | {
"start": 614,
"end": 5043
} | class ____:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0.0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(model, criterion, data_loader):
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
with torch.no_grad():
for image, target in data_loader:
output = model(image)
loss = criterion(output, target) # noqa: F841
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], image.size(0))
top5.update(acc5[0], image.size(0))
print()
return top1, top5
def load_model(model_file):
model = resnet18(pretrained=False)
state_dict = torch.load(model_file)
model.load_state_dict(state_dict)
model.to("cpu")
return model
def print_size_of_model(model):
if isinstance(model, torch.jit.RecursiveScriptModule):
torch.jit.save(model, "temp.p")
else:
torch.jit.save(torch.jit.script(model), "temp.p")
print("Size (MB):", os.path.getsize("temp.p") / 1e6)
os.remove("temp.p")
def prepare_data_loaders(data_path):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = torchvision.datasets.ImageNet(data_path,
split="train",
transform=transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]))
dataset_test = torchvision.datasets.ImageNet(data_path,
split="val",
transform=transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]))
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=train_batch_size,
sampler=train_sampler)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=eval_batch_size,
sampler=test_sampler)
return data_loader, data_loader_test
def training_loop(model, criterion, data_loader):
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
model.train()
for _ in range(10):
for data, target in data_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss = Variable(loss, requires_grad=True)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
| AverageMeter |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 38744,
"end": 48619
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[8]", L_y_: "f32[8]"):
l_x_ = L_x_
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = None
getitem: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None
subgraph_1 = self.subgraph_0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_0', getitem, l_y_); subgraph_1 = getitem = None
getitem_1: "f32[8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
subgraph_2 = self.subgraph_0
invoke_subgraph_2 = torch.ops.higher_order.invoke_subgraph(subgraph_2, 'subgraph_0', getitem_1, l_y_); subgraph_2 = getitem_1 = None
getitem_2: "f32[8]" = invoke_subgraph_2[0]; invoke_subgraph_2 = None
subgraph_3 = self.subgraph_0
invoke_subgraph_3 = torch.ops.higher_order.invoke_subgraph(subgraph_3, 'subgraph_0', getitem_2, l_y_); subgraph_3 = getitem_2 = None
getitem_3: "f32[8]" = invoke_subgraph_3[0]; invoke_subgraph_3 = None
subgraph_4 = self.subgraph_0
invoke_subgraph_4 = torch.ops.higher_order.invoke_subgraph(subgraph_4, 'subgraph_0', getitem_3, l_y_); subgraph_4 = getitem_3 = l_y_ = None
getitem_4: "f32[8]" = invoke_subgraph_4[0]; invoke_subgraph_4 = None
return (getitem_4,)
class subgraph_0(torch.nn.Module):
def forward(self, l_x_: "f32[8]", l_y_: "f32[8]"):
x: "f32[8]" = l_x_ * l_y_; l_x_ = None
x_1: "f32[8]" = x * l_y_; x = None
x_2: "f32[8]" = x_1 * l_y_; x_1 = None
x_3: "f32[8]" = x_2 * l_y_; x_2 = None
x_4: "f32[8]" = x_3 * l_y_; x_3 = l_y_ = None
return (x_4,)
""",
)
def test_input_mutation(self):
@nested_compile_region
def gn(x, y):
x.add_(1)
return torch.mul(x, y)
def fn(x, y):
return gn(x, y)
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
x_clone = x.clone()
self.assertEqual(opt_fn(x, y), fn(x_clone, y))
def test_input_mutation_mutiple_times(self):
@nested_compile_region
def gn(x, y):
x.add_(1)
return torch.mul(x, y)
def fn(x, y):
z = gn(x, y)
for _ in range(16):
z += gn(x, y)
return z
x = torch.randn(8, requires_grad=False)
x_clone = x.clone()
y = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
with (
torch.no_grad(),
):
out = opt_fn(x, y)
exp_out = fn(x_clone, y)
self.assertEqual(exp_out, out)
self.assertEqual(x_clone, x)
def test_input_mutation_mutiple_times_fake_tensor_cahche_hit(self):
@nested_compile_region
def gn(x, y):
x.add_(1)
return torch.mul(x, y)
def fn(x, y):
z = gn(x, y)
for _ in range(16):
z += gn(x, y)
return z
x = torch.randn(8, requires_grad=False)
x_clone = x.clone()
y = torch.randn(8, requires_grad=False)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
fake_prop_count = 0
def _mock_invoke_subgraph(mode, subgraph, identifier, *operands):
nonlocal fake_prop_count
fake_prop_count += 1
return (operands[0].clone(),)
with (
mock.patch(
"torch._higher_order_ops.utils.registered_hop_fake_fns",
{torch.ops.higher_order.invoke_subgraph: _mock_invoke_subgraph},
),
torch.no_grad(),
):
out = opt_fn(x, y)
# Fake propagation occurs only twice, with subsequent calls using cached results.
#
# First fake propagation (in collect_metadata_analysis of AOT):
# - Uses the original Dynamo graph
# - Flow: functionalization -> fake tensor
#
# Second fake propagation (in _create_graph of AOT):
# - Uses a materialized graph that includes epilogue operations
# - Flow: functionalization -> proxy -> fake tensor
#
# The key difference: the second time we materialize the graph with epilogue
# operations included in the proxy key. Since the dynamo graph module is not
# in the functional + epilogue format, the cache key should be different,
# preventing cache reuse between these two phases.
self.assertEqual(fake_prop_count, 2)
exp_out = fn(x_clone, y)
self.assertEqual(exp_out, out)
self.assertEqual(x_clone, x)
def test_input_mutation_inference_mode(self):
@nested_compile_region
def gn(x, y):
x.add_(1)
return torch.mul(x, y)
def fn(x, y):
z = torch.cos(x)
with torch.inference_mode():
return gn(torch.cos(z), y)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
with self.assertRaisesRegex(
RuntimeError,
"Inplace update to inference tensor outside InferenceMode is not allowed",
):
opt_fn(x, y)
def test_simple_module(self):
mod = torch.nn.Linear(8, 8)
@nested_compile_region
def gn(x):
return torch.cos(x), mod(x)
def fn(x):
out = gn(x)
return out[0] + out[1]
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
# requires_grad is False deliberately to force None the joint_graph
# outputs
x = torch.randn(8, 8, requires_grad=False)
x_clone = x.detach().clone().requires_grad_(False)
ref = fn(x)
res = opt_fn(x_clone)
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
def test_fail_with_direct_invoke_subgraph(self):
from torch._higher_order_ops import invoke_subgraph
def gn(x):
return torch.sin(x)
def fn(x):
return invoke_subgraph(gn, None, (x,))
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported, "Directly using invoke_subgraph is not"
):
opt_fn(x)
def test_input_output_aliasing(self):
@nested_compile_region
def gn(x, y):
return (x, torch.mul(x, y))
def fn(x, y):
outs = gn(x, y)
return outs[0] * outs[1]
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Encountered aliasing during higher order op tracing",
):
opt_fn(x, y)
def test_input_input_aliasing(self):
@nested_compile_region
def gn(x, y):
return torch.mul(x, y)
def fn(x):
return gn(x, x.view(1, 8))
x = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Encountered aliasing during higher order op tracing",
):
opt_fn(x)
def test_output_output_aliasing(self):
@nested_compile_region
def gn(x):
z = torch.cos(x)
return z, z.view(1, 8)
def fn(x):
return gn(x)
x = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Encountered aliasing during higher order op tracing",
):
opt_fn(x)
def test_mod_attr_aliasing(self):
class MutateParam(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.ones(8)
def forward(self, x):
self.a.add_(1)
return torch.mul(x, self.a)
@nested_compile_region
def gn(x):
return mod(x)
def fn(x, y):
return gn(x) * y
mod = MutateParam()
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
compiled_out = opt_fn(x, y)
# reset constant attr
mod.a = torch.ones(8)
self.assertEqual(compiled_out, fn(x, y))
def test_redundant_compile_region(self):
@nested_compile_region
@nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
return gn(x) + gn(x)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | plotly__plotly.py | plotly/graph_objs/layout/slider/_step.py | {
"start": 235,
"end": 12345
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.slider"
_path_str = "layout.slider.step"
_valid_props = {
"args",
"execute",
"label",
"method",
"name",
"templateitemname",
"value",
"visible",
}
@property
def args(self):
"""
Sets the arguments values to be passed to the Plotly method set
in `method` on slide.
The 'args' property is an info array that may be specified as:
* a list or tuple of up to 3 elements where:
(0) The 'args[0]' property accepts values of any type
(1) The 'args[1]' property accepts values of any type
(2) The 'args[2]' property accepts values of any type
Returns
-------
list
"""
return self["args"]
@args.setter
def args(self, val):
self["args"] = val
@property
def execute(self):
"""
When true, the API method is executed. When false, all other
behaviors are the same and command execution is skipped. This
may be useful when hooking into, for example, the
`plotly_sliderchange` method and executing the API command
manually without losing the benefit of the slider automatically
binding to the state of the plot through the specification of
`method` and `args`.
The 'execute' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["execute"]
@execute.setter
def execute(self, val):
self["execute"] = val
@property
def label(self):
"""
Sets the text label to appear on the slider
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def method(self):
"""
Sets the Plotly method to be called when the slider value is
changed. If the `skip` method is used, the API slider will
function as normal but will perform no API calls and will not
bind automatically to state updates. This may be used to create
a component interface and attach to slider events manually via
JavaScript.
The 'method' property is an enumeration that may be specified as:
- One of the following enumeration values:
['restyle', 'relayout', 'animate', 'update', 'skip']
Returns
-------
Any
"""
return self["method"]
@method.setter
def method(self, val):
self["method"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
Sets the value of the slider step, used to refer to the step
programatically. Defaults to the slider label if not provided.
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def visible(self):
"""
Determines whether or not this step is included in the slider.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
args
Sets the arguments values to be passed to the Plotly
method set in `method` on slide.
execute
When true, the API method is executed. When false, all
other behaviors are the same and command execution is
skipped. This may be useful when hooking into, for
example, the `plotly_sliderchange` method and executing
the API command manually without losing the benefit of
the slider automatically binding to the state of the
plot through the specification of `method` and `args`.
label
Sets the text label to appear on the slider
method
Sets the Plotly method to be called when the slider
value is changed. If the `skip` method is used, the API
slider will function as normal but will perform no API
calls and will not bind automatically to state updates.
This may be used to create a component interface and
attach to slider events manually via JavaScript.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
Sets the value of the slider step, used to refer to the
step programatically. Defaults to the slider label if
not provided.
visible
Determines whether or not this step is included in the
slider.
"""
def __init__(
self,
arg=None,
args=None,
execute=None,
label=None,
method=None,
name=None,
templateitemname=None,
value=None,
visible=None,
**kwargs,
):
"""
Construct a new Step object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Step`
args
Sets the arguments values to be passed to the Plotly
method set in `method` on slide.
execute
When true, the API method is executed. When false, all
other behaviors are the same and command execution is
skipped. This may be useful when hooking into, for
example, the `plotly_sliderchange` method and executing
the API command manually without losing the benefit of
the slider automatically binding to the state of the
plot through the specification of `method` and `args`.
label
Sets the text label to appear on the slider
method
Sets the Plotly method to be called when the slider
value is changed. If the `skip` method is used, the API
slider will function as normal but will perform no API
calls and will not bind automatically to state updates.
This may be used to create a component interface and
attach to slider events manually via JavaScript.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
Sets the value of the slider step, used to refer to the
step programatically. Defaults to the slider label if
not provided.
visible
Determines whether or not this step is included in the
slider.
Returns
-------
Step
"""
super().__init__("steps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.slider.Step
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Step`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("args", arg, args)
self._set_property("execute", arg, execute)
self._set_property("label", arg, label)
self._set_property("method", arg, method)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Step |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 21254,
"end": 22227
} | class ____(URLFetchStrategy):
"""The resource associated with a cache URL may be out of date."""
@_needs_stage
def fetch(self):
path = url_util.file_url_string_to_path(self.url)
# check whether the cache file exists.
if not os.path.isfile(path):
raise NoCacheError(f"No cache of {path}")
# remove old symlink if one is there.
filename = self.stage.save_filename
if os.path.lexists(filename):
os.remove(filename)
# Symlink to local cached archive.
symlink(path, filename)
# Remove link if checksum fails, or subsequent fetchers will assume they don't need to
# download.
if self.digest:
try:
self.check()
except ChecksumError:
os.remove(self.archive_file)
raise
# Notify the user how we fetched.
tty.msg(f"Using cached archive: {path}")
| CacheURLFetchStrategy |
python | pytorch__pytorch | test/cpp/aoti_inference/compile_model.py | {
"start": 353,
"end": 671
} | class ____(torch.nn.Module):
"""
a simple module to be compiled
"""
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(4, 6)
self.relu = torch.nn.ReLU()
def forward(self, x):
a = self.fc(x)
b = self.relu(a)
return b
| SimpleModule |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 2495,
"end": 3248
} | class ____(BaseActionValidatorHandler):
provider = Action.Type.MSTEAMS
notify_action_form = MsTeamsNotifyServiceForm
def generate_action_form_data(self) -> dict[str, Any]:
return {
"team": self.validated_data["integration_id"],
"channel": self.validated_data["config"]["target_display"],
}
def update_action_data(self, cleaned_data: dict[str, Any]) -> dict[str, Any]:
self.validated_data["config"].update(
{
"target_display": cleaned_data["channel"],
"target_identifier": cleaned_data["channel_id"],
}
)
return self.validated_data
@action_validator_registry.register(Action.Type.DISCORD)
| MSTeamsActionValidatorHandler |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 15568,
"end": 20126
} | class ____:
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = nx.reportviews.EdgeView
def test_pickle(self):
import pickle
ev = self.eview(self.G)
pev = pickle.loads(pickle.dumps(ev, -1))
assert ev == pev
assert ev.__slots__ == pev.__slots__
def modify_edge(self, G, e, **kwds):
G._adj[e[0]][e[1]].update(kwds)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
def test_repr(self):
ev = self.eview(self.G)
rep = (
"EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
def test_getitem(self):
G = self.G.copy()
ev = G.edges
G.edges[0, 1]["foo"] = "bar"
assert ev[0, 1] == {"foo": "bar"}
# slicing
with pytest.raises(nx.NetworkXError, match=".*does not support slicing"):
G.edges[0:5]
# Invalid edge
with pytest.raises(KeyError, match=r".*edge.*is not in the graph."):
G.edges[0, 9]
def test_call(self):
ev = self.eview(self.G)
assert id(ev) == id(ev())
assert id(ev) == id(ev(data=False))
assert id(ev) != id(ev(data=True))
assert id(ev) != id(ev(nbunch=1))
def test_data(self):
ev = self.eview(self.G)
assert id(ev) != id(ev.data())
assert id(ev) == id(ev.data(data=False))
assert id(ev) != id(ev.data(data=True))
assert id(ev) != id(ev.data(nbunch=1))
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_contains(self):
ev = self.eview(self.G)
edv = ev()
if self.G.is_directed():
assert (1, 2) in ev and (2, 1) not in ev
assert (1, 2) in edv and (2, 1) not in edv
else:
assert (1, 2) in ev and (2, 1) in ev
assert (1, 2) in edv and (2, 1) in edv
assert (1, 4) not in ev
assert (1, 4) not in edv
# edge not in graph
assert (1, 90) not in ev
assert (90, 1) not in ev
assert (1, 90) not in edv
assert (90, 1) not in edv
def test_contains_with_nbunch(self):
ev = self.eview(self.G)
evn = ev(nbunch=[0, 2])
assert (0, 1) in evn
assert (1, 2) in evn
assert (2, 3) in evn
assert (3, 4) not in evn
assert (4, 5) not in evn
assert (5, 6) not in evn
assert (7, 8) not in evn
assert (8, 9) not in evn
def test_len(self):
ev = self.eview(self.G)
num_ed = 9 if self.G.is_multigraph() else 8
assert len(ev) == num_ed
H = self.G.copy()
H.add_edge(1, 1)
assert len(H.edges(1)) == 3 + H.is_multigraph() - H.is_directed()
assert len(H.edges()) == num_ed + 1
assert len(H.edges) == num_ed + 1
def test_and(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
assert some_edges & ev, {(0, 1)}
assert ev & some_edges, {(0, 1)}
else:
assert ev & some_edges == {(0, 1), (1, 0)}
assert some_edges & ev == {(0, 1), (1, 0)}
return
def test_or(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result1 = {(n, n + 1) for n in range(8)}
result1.update(some_edges)
result2 = {(n + 1, n) for n in range(8)}
result2.update(some_edges)
assert (ev | some_edges) in (result1, result2)
assert (some_edges | ev) in (result1, result2)
def test_xor(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
result = {(n, n + 1) for n in range(1, 8)}
result.update({(1, 0), (0, 2)})
assert ev ^ some_edges == result
else:
result = {(n, n + 1) for n in range(1, 8)}
result.update({(0, 2)})
assert ev ^ some_edges == result
return
def test_sub(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result = {(n, n + 1) for n in range(8)}
result.remove((0, 1))
assert ev - some_edges, result
| TestEdgeView |
python | google__jax | jax/_src/internal_test_util/test_harnesses.py | {
"start": 3791,
"end": 10452
} | class ____:
"""Specifies inputs and callable for a test harness.
See the module docstring for an introduction to harnesses.
A harness is conceptually a callable and a list of arguments, that together
exercise a use case. The harness can optionally have additional parameters
that can be used by the test.
The arguments are specified through argument descriptors. An argument
descriptor can be:
* a numeric value or ndarray, or
* an instance of ``RandArg(shape, dtype)`` to be used with a PRNG to
generate random tensor of the given shape and type, or
* an instance of ``CustomArg(fun)`` to be used with a PRNG, or
* an instance of ``StaticArg(value)``. Often these are the non-array
arguments, e.g., a shape.
The given callable will be passed one argument corresponding to each
argument descriptor, e.g., `harness.fun(* harness.args_maker(rng))`.
However, in many applications we only care about the non-static arguments.
For that purpose, you can use `harness.dyn_fun(*
harness.dyn_args_maked(rng))`,
where `harness.dyn_fun` is `harness.fun` specialized to the static arguments.
For example, a harness for ``lax.take(arr, indices, axis=None)`` may want
to expose as external (non-static) argument the array and the indices, and
keep the axis as a static argument (technically specializing the `take` to
a axis):
Harness(lax.slice_p,
f"take_axis={axis}",
lax.take,
[RandArg((2, 4), np.float32), np.array([-1, 0, 1]),
StaticArg(axis)],
axis=axis)
Each harness can have a list of Limitations that describe the cases when
the harness may not be fully implemented.
"""
# The group name most often is the primitive name.
group_name: str
# Descriptive name of the harness, used as a testcase_name. Unique in a group.
# Will be sanitized to work with -k test filtering.
name: str
# The function taking all arguments (static and dynamic).
fun: Callable
# Describes how to construct arguments, see the class docstring.
arg_descriptors: Sequence[ArgDescriptor]
dtype: DType
# A set of limitations describing the cases that are not supported or
# partially implemented in JAX for this harness.
jax_unimplemented: Sequence[Limitation]
rng_factory: Callable
# Carry some arbitrary parameters that the test can access.
params: dict[str, Any]
def __init__(self,
group_name,
name,
fun,
arg_descriptors,
*,
dtype,
rng_factory=jtu.rand_default,
jax_unimplemented: Sequence[Limitation] = (),
**params):
"""See class docstring."""
self.group_name = jtu.sanitize_test_name(group_name)
self.name = jtu.sanitize_test_name(name)
self.fullname = self.name if self.group_name is None else f"{self.group_name}_{self.name}"
self.fun = fun
self.arg_descriptors = arg_descriptors
self.rng_factory = rng_factory
self.jax_unimplemented = jax_unimplemented
self.dtype = dtype
self.params = params
def __str__(self):
return self.fullname
def _arg_maker(self, arg_descriptor, rng: Rng):
if isinstance(arg_descriptor, StaticArg):
return arg_descriptor.value
if isinstance(arg_descriptor, RandArg):
return self.rng_factory(rng)(arg_descriptor.shape, arg_descriptor.dtype)
if isinstance(arg_descriptor, CustomArg):
return arg_descriptor.make(rng)
return arg_descriptor
def args_maker(self, rng: Rng) -> Sequence:
"""All-argument maker, including the static ones."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors]
def dyn_args_maker(self, rng: Rng) -> Sequence:
"""A dynamic-argument maker, for use with `dyn_fun`."""
return [
self._arg_maker(ad, rng)
for ad in self.arg_descriptors
if not isinstance(ad, StaticArg)
]
def dyn_fun(self, *dyn_args):
"""Invokes `fun` given just the dynamic arguments."""
all_args = self._args_from_dynargs(dyn_args)
return self.fun(*all_args)
def _args_from_dynargs(self, dyn_args: Sequence) -> Sequence:
"""All arguments, including the static ones."""
next_dynamic_argnum = 0
all_args = []
for ad in self.arg_descriptors:
if isinstance(ad, StaticArg):
all_args.append(ad.value)
else:
all_args.append(dyn_args[next_dynamic_argnum])
next_dynamic_argnum += 1
return all_args
def filter(self,
device_under_test: str,
*,
include_jax_unimpl: bool = False,
one_containing: str | None = None) -> bool:
if not include_jax_unimpl:
if any(
device_under_test in l.devices
for l in self.jax_unimplemented
if l.filter(device=device_under_test, dtype=self.dtype)
):
return False
if one_containing is not None and one_containing not in self.fullname:
return False
return True
def dtypes_to_str(dtype_list: Sequence[DType], empty_means_all=False) -> str:
"""User-friendly description of a set of dtypes"""
if not dtype_list and empty_means_all:
return "all"
names = {np.dtype(dt).name for dt in dtype_list}
signed = {"int8", "int16", "int32", "int64"}
if signed <= names:
names = (names - signed) | {"signed"}
integers = {"uint8", "uint16", "uint32", "uint64"}
if integers <= names:
names = (names - integers) | {"unsigned"}
integer = {"signed", "unsigned"}
if integer <= names:
names = (names - integer) | {"integer"}
floating = {"bfloat16", "float16", "float32", "float64"}
if floating <= names:
names = (names - floating) | {"floating"}
complex = {"complex64", "complex128"}
if complex <= names:
names = (names - complex) | {"complex"}
inexact = {"floating", "complex"}
if inexact <= names:
names = (names - inexact) | {"inexact"}
all_types = {"integer", "inexact", "bool"}
if all_types <= names:
names = (names - all_types) | {"all"}
return ", ".join(sorted(names))
##### All harnesses in this file.
all_harnesses: list[Harness] = []
def define(
group_name,
name,
fun,
arg_descriptors,
*,
dtype,
rng_factory=jtu.rand_default,
jax_unimplemented: Sequence[Limitation] = (),
**params):
"""Defines a harness and stores it in `all_harnesses`. See Harness."""
group_name = str(group_name)
h = Harness(
group_name,
name,
fun,
arg_descriptors,
rng_factory=rng_factory,
jax_unimplemented=jax_unimplemented,
dtype=dtype,
**params)
all_harnesses.append(h)
| Harness |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 174565,
"end": 177329
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(
self,
client_id: str,
client_secret: str,
access_token: Optional[str] = None,
refresh_token: Optional[str] = None,
):
self.auth_type = "OAuth"
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.access_token = check.opt_str_param(access_token, "access_token")
self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
class UsernameAndPassword:
@public
def __init__(self, username: str, password: str):
self.auth_type = "username/password"
self.username = check.str_param(username, "username")
self.password = check.str_param(password, "password")
@public
def __init__(
self,
name: str,
credentials: Union["SnowflakeSource.OAuth20", "SnowflakeSource.UsernameAndPassword"],
host: str,
role: str,
warehouse: str,
database: str,
schema: str,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Source for Snowflake.
Documentation can be found at https://docs.airbyte.com/integrations/sources/snowflake
Args:
name (str): The name of the destination.
host (str): The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com).
role (str): The role you created for Airbyte to access Snowflake.
warehouse (str): The warehouse you created for Airbyte to access data.
database (str): The database you created for Airbyte to access data.
schema (str): The source Snowflake schema tables.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
"""
self.credentials = check.inst_param(
credentials,
"credentials",
(SnowflakeSource.OAuth20, SnowflakeSource.UsernameAndPassword),
)
self.host = check.str_param(host, "host")
self.role = check.str_param(role, "role")
self.warehouse = check.str_param(warehouse, "warehouse")
self.database = check.str_param(database, "database")
self.schema = check.str_param(schema, "schema")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
super().__init__("Snowflake", name)
| SnowflakeSource |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py | {
"start": 19300,
"end": 26032
} | class ____(nn.Module):
"""Image embedding."""
def __init__(self, config: Phi4MultimodalConfig):
super().__init__()
self.config = config
self.layer_idx = config.vision_config.feature_layer
self.crop_size = config.vision_config.crop_size
self.image_dim_out = config.vision_config.hidden_size
n_patches = config.vision_config.image_size // config.vision_config.patch_size
if n_patches % 2 != 0:
self.img_processor_padding = nn.ReflectionPad2d((0, 1, 0, 1))
n_patches += 1
self.num_img_tokens = (n_patches // 2) ** 2
self.drop = nn.Dropout(config.embd_pdrop)
self.img_processor = Phi4MultimodalVisionModel._from_config(config.vision_config)
self.image_token_compression = nn.AvgPool2d(kernel_size=2, stride=2)
self.img_projection_up = nn.Linear(self.image_dim_out, config.hidden_size)
self.img_projection_down = nn.Linear(config.hidden_size, config.hidden_size)
self.global_img_feature_extensor = nn.Parameter(torch.zeros([1, 1, self.image_dim_out]))
self.sub_img_feature_extensor = nn.Parameter(torch.zeros([1, 1, 1, self.image_dim_out]))
def get_img_features(self, img_embeds: torch.FloatTensor, attention_mask=None) -> torch.FloatTensor:
img_processor_output = self.img_processor(
img_embeds, patch_attention_mask=attention_mask, output_hidden_states=True
)
img_feature = img_processor_output.hidden_states[self.layer_idx]
patch_feature = img_feature
# reshape to 2D tensor
width = int(math.sqrt(patch_feature.size(1)))
patch_feature = patch_feature.view(-1, width, width, patch_feature.size(-1))
# convert to NCHW
patch_feature = patch_feature.permute(0, 3, 1, 2)
if getattr(self, "img_processor_padding", None) is not None:
patch_feature = self.img_processor_padding(patch_feature)
patch_feature = self.image_token_compression(patch_feature)
# convert to NHWC
patch_feature = patch_feature.permute(0, 2, 3, 1)
patch_feature = patch_feature.view(-1, patch_feature.size(1) * patch_feature.size(2), patch_feature.size(-1))
return patch_feature
def forward(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.Tensor,
image_pixel_values: torch.FloatTensor,
image_sizes: Optional[torch.Tensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
image_pixel_values = image_pixel_values.to(self.img_processor.embeddings.patch_embedding.weight.dtype)
target_device = self.img_projection_up.bias.device
target_dtype = self.img_projection_up.bias.dtype
batch_size = image_pixel_values.shape[0]
img_features = self.get_img_features(
image_pixel_values.flatten(0, 1),
attention_mask=image_attention_mask.flatten(0, 1).to(dtype=bool, device=target_device),
)
base_feat_size = int(np.sqrt(img_features.shape[1]))
img_features = img_features.view(batch_size, -1, base_feat_size**2, self.image_dim_out)
image_sizes = image_sizes.view(-1, 2)
output_imgs = []
for idx in range(batch_size):
height, width = image_sizes[idx]
height_ratio = height // self.crop_size
width_ratio = width // self.crop_size
area_ratio = height_ratio * width_ratio
global_img = img_features[idx, :1]
global_img = global_img.reshape(1, base_feat_size, base_feat_size, self.image_dim_out).contiguous()
temporary_extensor = self.sub_img_feature_extensor.repeat(1, base_feat_size, 1, 1)
global_img = torch.cat([global_img, temporary_extensor], dim=2).reshape(1, -1, self.image_dim_out)
sub_img = img_features[idx, 1:]
sub_img = sub_img[:area_ratio]
sub_img = (
sub_img.reshape(height_ratio, width_ratio, base_feat_size, base_feat_size, self.image_dim_out)
.transpose(1, 2)
.reshape(1, height_ratio * base_feat_size, width_ratio * base_feat_size, self.image_dim_out)
.contiguous()
)
if image_attention_mask is not None:
reshaped_image_attention_mask = (
image_attention_mask[idx, 1 : area_ratio + 1, 0::2, 0::2]
.reshape(height_ratio, width_ratio, base_feat_size, base_feat_size)
.transpose(1, 2)
.reshape(1, height_ratio * base_feat_size, width_ratio * base_feat_size)
)
useful_height = int(reshaped_image_attention_mask[0, :, 0].sum().item())
useful_width = int(reshaped_image_attention_mask[0, 0, :].sum().item())
sub_img = sub_img[:, :useful_height, :useful_width]
temporary_extensor = self.sub_img_feature_extensor.repeat(1, useful_height, 1, 1)
else:
temporary_extensor = self.sub_img_feature_extensor.repeat(1, height_ratio * base_feat_size, 1, 1)
sub_img = torch.cat([sub_img, temporary_extensor], dim=2).reshape(1, -1, self.image_dim_out)
# Merge global and sub
output_imgs.append(torch.cat([sub_img, self.global_img_feature_extensor, global_img], dim=1))
img_set_tensor = []
for output_img in output_imgs:
output_img = output_img.to(device=target_device, dtype=target_dtype)
img_feature_proj = self.img_projection_up(output_img)
img_feature_proj = nn.functional.gelu(img_feature_proj)
img_feature_proj = self.img_projection_down(img_feature_proj)
img_set_tensor.append(img_feature_proj)
merged_img_set_tensor = torch.cat(img_set_tensor, dim=1).squeeze(0)
merged_img_set_tensor = merged_img_set_tensor.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device)
with torch.no_grad():
positions_tuple = torch.nonzero(input_ids == self.config.vision_config.image_token_id, as_tuple=True)
# Temporarily disable autocast to avoid issue on bf16 tensors
# Ref: https://github.com/pytorch/pytorch/issues/132715
with torch.autocast(device_type=inputs_embeds.device.type, enabled=False):
image_embeds = inputs_embeds.index_put(
indices=positions_tuple, values=merged_img_set_tensor, accumulate=False
)
image_embeds = self.drop(image_embeds)
return image_embeds
########################################################## AUDIO #############################################
| Phi4MultimodalImageEmbedding |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 34704,
"end": 37422
} | class ____(BaseAsyncRealtimeConnectionResource):
async def create(
self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit
) -> None:
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
)
)
async def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None:
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
)
)
| AsyncRealtimeResponseResource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.