language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 18865,
"end": 22688
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(
T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
)
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, layer_idx=layer_idx))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
return_dict=True,
cache_position=None,
):
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = self_attention_outputs[0]
attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
past_key_values=past_key_values,
query_length=cache_position[-1] + 1,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[1:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
return (
outputs + attention_outputs
) # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
| T5Block |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 36880,
"end": 37007
} | class ____(UserDefinedClassVariable):
@property
def fn(self):
return self.value
| UserDefinedExceptionClassVariable |
python | pandas-dev__pandas | pandas/tests/indexes/test_any_index.py | {
"start": 5041,
"end": 5535
} | class ____:
def test_argmax_axis_invalid(self, index):
# GH#23081
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
with pytest.raises(ValueError, match=msg):
index.argmax(axis=1)
with pytest.raises(ValueError, match=msg):
index.argmin(axis=2)
with pytest.raises(ValueError, match=msg):
index.min(axis=-2)
with pytest.raises(ValueError, match=msg):
index.max(axis=-3)
| TestReductions |
python | nedbat__coveragepy | tests/test_config.py | {
"start": 687,
"end": 22884
} | class ____(CoverageTest):
"""Tests of the different sources of configuration settings."""
def test_default_config(self) -> None:
# Just constructing a coverage() object gets the right defaults.
cov = coverage.Coverage()
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".coverage"
def test_arguments(self) -> None:
# Arguments to the constructor are applied to the configuration.
cov = coverage.Coverage(timid=True, data_file="fooey.dat", concurrency="multiprocessing")
assert cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == "fooey.dat"
assert cov.config.concurrency == ["multiprocessing"]
def test_config_file(self) -> None:
# A .coveragerc file will be read into the configuration.
self.make_file(
".coveragerc",
"""\
# This is just a bogus .rc file for testing.
[run]
timid = True
data_file = .hello_kitty.data
""",
)
cov = coverage.Coverage()
assert cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".hello_kitty.data"
@pytest.mark.parametrize("file_class", FilePathClasses)
def test_named_config_file(self, file_class: FilePathType) -> None:
# You can name the config file what you like.
self.make_file(
"my_cov.ini",
"""\
[run]
timid = True
; I wouldn't really use this as a data file...
data_file = delete.me
""",
)
cov = coverage.Coverage(config_file=file_class("my_cov.ini"))
assert cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == "delete.me"
def test_toml_config_file(self) -> None:
# A pyproject.toml file will be read into the configuration.
self.make_file(
"pyproject.toml",
"""\
# This is just a bogus toml file for testing.
[tool.somethingelse]
authors = ["Joe D'Ávila <joe@gmail.com>"]
[tool.coverage.run]
concurrency = ["thread", "eventlet"]
timid = true
data_file = ".hello_kitty.data"
plugins = ["plugins.a_plugin"]
[tool.coverage.report]
precision = 3
fail_under = 90.5
[tool.coverage.html]
title = "tabblo & «ταБЬℓσ»"
[tool.coverage.plugins.a_plugin]
hello = "world"
""",
)
cov = coverage.Coverage()
assert cov.config.timid
assert not cov.config.branch
assert cov.config.concurrency == ["thread", "eventlet"]
assert cov.config.data_file == ".hello_kitty.data"
assert cov.config.plugins == ["plugins.a_plugin"]
assert cov.config.precision == 3
assert cov.config.html_title == "tabblo & «ταБЬℓσ»"
assert cov.config.fail_under == 90.5
assert cov.config.get_plugin_options("plugins.a_plugin") == {"hello": "world"}
def test_toml_ints_can_be_floats(self) -> None:
# Test that our class doesn't reject integers when loading floats
self.make_file(
"pyproject.toml",
"""\
# This is just a bogus toml file for testing.
[tool.coverage.report]
fail_under = 90
""",
)
cov = coverage.Coverage()
assert cov.config.fail_under == 90
assert isinstance(cov.config.fail_under, float)
def test_ignored_config_file(self) -> None:
# You can disable reading the .coveragerc file.
self.make_file(
".coveragerc",
"""\
[run]
timid = True
data_file = delete.me
""",
)
cov = coverage.Coverage(config_file=False)
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".coverage"
def test_config_file_then_args(self) -> None:
# The arguments override the .coveragerc file.
self.make_file(
".coveragerc",
"""\
[run]
timid = True
data_file = weirdo.file
""",
)
cov = coverage.Coverage(timid=False, data_file=".mycov")
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".mycov"
def test_data_file_from_environment(self) -> None:
# There's an environment variable for the data_file.
self.make_file(
".coveragerc",
"""\
[run]
timid = True
data_file = weirdo.file
""",
)
self.set_environ("COVERAGE_FILE", "fromenv.dat")
cov = coverage.Coverage()
assert cov.config.data_file == "fromenv.dat"
# But the constructor arguments override the environment variable.
cov = coverage.Coverage(data_file="fromarg.dat")
assert cov.config.data_file == "fromarg.dat"
def test_debug_from_environment(self) -> None:
self.make_file(
".coveragerc",
"""\
[run]
debug = dataio, pids
""",
)
self.set_environ("COVERAGE_DEBUG", "callers, fooey")
cov = coverage.Coverage()
assert cov.config.debug == ["dataio", "pids", "callers", "fooey"]
def test_rcfile_from_environment(self) -> None:
self.make_file(
"here.ini",
"""\
[run]
data_file = overthere.dat
""",
)
self.set_environ("COVERAGE_RCFILE", "here.ini")
cov = coverage.Coverage()
assert cov.config.data_file == "overthere.dat"
def test_missing_rcfile_from_environment(self) -> None:
self.set_environ("COVERAGE_RCFILE", "nowhere.ini")
msg = "Couldn't read 'nowhere.ini' as a config file"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage()
@pytest.mark.parametrize("force", [False, True])
def test_force_environment(self, force: bool) -> None:
self.make_file(
".coveragerc",
"""\
[run]
debug = dataio, pids
""",
)
self.make_file(
"force.ini",
"""\
[run]
debug = callers, fooey
""",
)
if force:
self.set_environ("COVERAGE_FORCE_CONFIG", "force.ini")
cov = coverage.Coverage()
if force:
assert cov.config.debug == ["callers", "fooey"]
else:
assert cov.config.debug == ["dataio", "pids"]
@pytest.mark.parametrize(
"bad_config, msg",
[
("[run]\ntimid = maybe?\n", r"maybe[?]"),
("timid = 1\n", r"no section headers"),
("[run\n", r"\[run"),
(
"[report]\nexclude_lines = foo(\n",
r"Invalid \[report\].exclude_lines value 'foo\(': "
+ r"(unbalanced parenthesis|missing \))",
),
(
"[report]\nexclude_also = foo(\n",
r"Invalid \[report\].exclude_also value 'foo\(': "
+ r"(unbalanced parenthesis|missing \))",
),
(
"[report]\npartial_branches = foo[\n",
r"Invalid \[report\].partial_branches value 'foo\[': "
+ r"(unexpected end of regular expression|unterminated character set)",
),
(
"[report]\npartial_also = foo[\n",
r"Invalid \[report\].partial_also value 'foo\[': "
+ r"(unexpected end of regular expression|unterminated character set)",
),
(
"[report]\npartial_branches_always = foo***\n",
r"Invalid \[report\].partial_branches_always value "
+ r"'foo\*\*\*': "
+ r"multiple repeat",
),
],
)
def test_parse_errors(self, bad_config: str, msg: str) -> None:
# Im-parsable values raise ConfigError, with details.
self.make_file(".coveragerc", bad_config)
with pytest.raises(ConfigError, match=msg):
coverage.Coverage()
@pytest.mark.parametrize(
"bad_config, msg",
[
('[tool.coverage.run]\ntimid = "maybe?"\n', r"maybe[?]"),
("[tool.coverage.run\n", None),
(
'[tool.coverage.report]\nexclude_lines = ["foo("]\n',
r"Invalid \[tool.coverage.report\].exclude_lines value 'foo\(': "
+ r"(unbalanced parenthesis|missing \))",
),
(
'[tool.coverage.report]\nexclude_also = ["foo("]\n',
r"Invalid \[tool.coverage.report\].exclude_also value 'foo\(': "
+ r"(unbalanced parenthesis|missing \))",
),
(
'[tool.coverage.report]\npartial_branches = ["foo["]\n',
r"Invalid \[tool.coverage.report\].partial_branches value 'foo\[': "
+ r"(unexpected end of regular expression|unterminated character set)",
),
(
'[tool.coverage.report]\npartial_also = ["foo["]\n',
r"Invalid \[tool.coverage.report\].partial_also value 'foo\[': "
+ r"(unexpected end of regular expression|unterminated character set)",
),
(
'[tool.coverage.report]\npartial_branches_always = ["foo***"]\n',
r"Invalid \[tool.coverage.report\].partial_branches_always value "
+ r"'foo\*\*\*': "
+ r"multiple repeat",
),
('[tool.coverage.run]\nconcurrency="foo"', "not a list"),
("[tool.coverage.report]\nprecision=1.23", "not an integer"),
('[tool.coverage.report]\nfail_under="s"', "couldn't convert to a float"),
],
)
def test_toml_parse_errors(self, bad_config: str, msg: str) -> None:
# Im-parsable values raise ConfigError, with details.
self.make_file("pyproject.toml", bad_config)
with pytest.raises(ConfigError, match=msg):
coverage.Coverage()
def test_environment_vars_in_config(self) -> None:
# Config files can have $envvars in them.
self.make_file(
".coveragerc",
"""\
[run]
data_file = $DATA_FILE.fooey
branch = $OKAY
[report]
exclude_lines =
the_$$one
another${THING}
x${THING}y
x${NOTHING}y
huh$${X}what
""",
)
self.set_environ("DATA_FILE", "hello-world")
self.set_environ("THING", "ZZZ")
self.set_environ("OKAY", "yes")
cov = coverage.Coverage()
assert cov.config.data_file == "hello-world.fooey"
assert cov.config.branch is True
assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"]
def test_environment_vars_in_toml_config(self) -> None:
# Config files can have $envvars in them.
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.run]
data_file = "$DATA_FILE.fooey"
branch = "$BRANCH"
[tool.coverage.report]
precision = "$DIGITS"
fail_under = "$FAIL_UNDER"
exclude_lines = [
"the_$$one",
"another${THING}",
"x${THING}y",
"x${NOTHING}y",
"huh$${X}what",
]
[othersection]
# This reproduces the failure from https://github.com/coveragepy/coveragepy/issues/1481
# When OTHER has a backslash that isn't a valid escape, like \\z (see below).
something = "if [ $OTHER ]; then printf '%s\\n' 'Hi'; fi"
""",
)
self.set_environ("BRANCH", "true")
self.set_environ("DIGITS", "3")
self.set_environ("FAIL_UNDER", "90.5")
self.set_environ("DATA_FILE", "hello-world")
self.set_environ("THING", "ZZZ")
self.set_environ("OTHER", "hi\\zebra")
cov = coverage.Coverage()
assert cov.config.branch is True
assert cov.config.precision == 3
assert cov.config.data_file == "hello-world.fooey"
assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"]
def test_tilde_in_config(self) -> None:
# Config entries that are file paths can be tilde-expanded.
self.make_file(
".coveragerc",
"""\
[run]
data_file = ~/data.file
[html]
directory = ~joe/html_dir
[json]
output = ~/json/output.json
[lcov]
output = ~/lcov/~foo.lcov
[xml]
output = ~/somewhere/xml.out
[report]
# Strings that aren't file paths are not tilde-expanded.
exclude_lines =
~/data.file
~joe/html_dir
[paths]
mapping =
~/src
~joe/source
""",
)
self.assert_tilde_results()
def test_tilde_in_toml_config(self) -> None:
# Config entries that are file paths can be tilde-expanded.
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.run]
data_file = "~/data.file"
[tool.coverage.html]
directory = "~joe/html_dir"
[tool.coverage.json]
output = "~/json/output.json"
[tool.coverage.lcov]
output = "~/lcov/~foo.lcov"
[tool.coverage.xml]
output = "~/somewhere/xml.out"
[tool.coverage.report]
# Strings that aren't file paths are not tilde-expanded.
exclude_lines = [
"~/data.file",
"~joe/html_dir",
]
[tool.coverage.paths]
mapping = [
"~/src",
"~joe/source",
]
""",
)
self.assert_tilde_results()
def assert_tilde_results(self) -> None:
"""Common assertions for two tilde tests."""
def expanduser(s: str) -> str:
"""Fake tilde expansion"""
s = s.replace("~/", "/Users/me/")
s = s.replace("~joe/", "/Users/joe/")
return s
with mock.patch.object(
coverage.config.os.path, # type: ignore[attr-defined]
"expanduser",
new=expanduser,
):
cov = coverage.Coverage()
assert cov.config.data_file == "/Users/me/data.file"
assert cov.config.html_dir == "/Users/joe/html_dir"
assert cov.config.json_output == "/Users/me/json/output.json"
assert cov.config.lcov_output == "/Users/me/lcov/~foo.lcov"
assert cov.config.xml_output == "/Users/me/somewhere/xml.out"
assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"]
assert cov.config.paths == {"mapping": ["/Users/me/src", "/Users/joe/source"]}
def test_tweaks_after_constructor(self) -> None:
# set_option can be used after construction to affect the config.
cov = coverage.Coverage(timid=True, data_file="fooey.dat")
cov.set_option("run:timid", False)
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == "fooey.dat"
assert not cov.get_option("run:timid")
assert not cov.get_option("run:branch")
assert cov.get_option("run:data_file") == "fooey.dat"
def test_tweaks_paths_after_constructor(self) -> None:
self.make_file(
".coveragerc",
"""\
[paths]
first =
/first/1
/first/2
second =
/second/a
/second/b
""",
)
old_paths = {
"first": ["/first/1", "/first/2"],
"second": ["/second/a", "/second/b"],
}
cov = coverage.Coverage()
paths = cov.get_option("paths")
assert paths == old_paths
new_paths = {
"magic": ["src", "ok"],
}
cov.set_option("paths", new_paths)
assert cov.get_option("paths") == new_paths
def test_tweak_error_checking(self) -> None:
# Trying to set an unknown config value raises an error.
cov = coverage.Coverage()
with pytest.raises(ConfigError, match="No such option: 'run:xyzzy'"):
cov.set_option("run:xyzzy", 12)
with pytest.raises(ConfigError, match="No such option: 'xyzzy:foo'"):
cov.set_option("xyzzy:foo", 12)
with pytest.raises(ConfigError, match="No such option: 'run:xyzzy'"):
_ = cov.get_option("run:xyzzy")
with pytest.raises(ConfigError, match="No such option: 'xyzzy:foo'"):
_ = cov.get_option("xyzzy:foo")
def test_tweak_plugin_options(self) -> None:
# Plugin options have a more flexible syntax.
cov = coverage.Coverage()
cov.set_option("run:plugins", ["fooey.plugin", "xyzzy.coverage.plugin"])
cov.set_option("fooey.plugin:xyzzy", 17)
cov.set_option("xyzzy.coverage.plugin:plugh", ["a", "b"])
with pytest.raises(ConfigError, match="No such option: 'no_such.plugin:foo'"):
cov.set_option("no_such.plugin:foo", 23)
assert cov.get_option("fooey.plugin:xyzzy") == 17
assert cov.get_option("xyzzy.coverage.plugin:plugh") == ["a", "b"]
with pytest.raises(ConfigError, match="No such option: 'no_such.plugin:foo'"):
_ = cov.get_option("no_such.plugin:foo")
def test_unknown_option(self) -> None:
self.make_file(
".coveragerc",
"""\
[run]
xyzzy = 17
""",
)
msg = r"Unrecognized option '\[run\] xyzzy=' in config file .coveragerc"
with pytest.warns(CoverageWarning, match=msg):
_ = coverage.Coverage()
def test_unknown_option_toml(self) -> None:
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.run]
xyzzy = 17
""",
)
msg = r"Unrecognized option '\[tool.coverage.run\] xyzzy=' in config file pyproject.toml"
with pytest.warns(CoverageWarning, match=msg):
_ = coverage.Coverage()
def test_unknown_patch(self) -> None:
self.make_file("foo.py", "a = 1")
self.make_file(
".coveragerc",
"""\
[run]
patch =
_exit
xyzzy
""",
)
msg = "Unknown patch 'xyzzy'"
with pytest.raises(ConfigError, match=msg):
cov = coverage.Coverage()
self.start_import_stop(cov, "foo")
def test_misplaced_option(self) -> None:
self.make_file(
".coveragerc",
"""\
[report]
branch = True
""",
)
msg = r"Unrecognized option '\[report\] branch=' in config file .coveragerc"
with pytest.warns(CoverageWarning, match=msg):
_ = coverage.Coverage()
def test_unknown_option_in_other_ini_file(self) -> None:
self.make_file(
"setup.cfg",
"""\
[coverage:run]
huh = what?
""",
)
msg = r"Unrecognized option '\[coverage:run\] huh=' in config file setup.cfg"
with pytest.warns(CoverageWarning, match=msg):
_ = coverage.Coverage()
def test_exceptions_from_missing_things(self) -> None:
self.make_file(
"config.ini",
"""\
[run]
branch = True
""",
)
config = HandyConfigParser(True)
config.read(["config.ini"])
with pytest.raises(ConfigError, match="No section: 'xyzzy'"):
config.options("xyzzy")
with pytest.raises(ConfigError, match="No option 'foo' in section: 'xyzzy'"):
config.get("xyzzy", "foo")
def test_exclude_also(self) -> None:
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.report]
exclude_also = ["foobar", "raise .*Error"]
""",
)
cov = coverage.Coverage()
expected = coverage.config.DEFAULT_EXCLUDE + ["foobar", "raise .*Error"]
assert cov.config.exclude_list == expected
def test_partial_also(self) -> None:
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.report]
partial_also = ["foobar", "raise .*Error"]
""",
)
cov = coverage.Coverage()
expected = coverage.config.DEFAULT_PARTIAL + ["foobar", "raise .*Error"]
assert cov.config.partial_list == expected
def test_core_option(self) -> None:
# Test that the core option can be set in the configuration file.
self.del_environ("COVERAGE_CORE")
cov = coverage.Coverage()
default_core = cov.config.core
core_to_set = "ctrace" if default_core == "pytrace" else "pytrace"
self.make_file(
".coveragerc",
f"""\
[run]
core = {core_to_set}
""",
)
cov = coverage.Coverage()
assert cov.config.core == core_to_set
os.remove(".coveragerc")
self.make_file(
"pyproject.toml",
f"""\
[tool.coverage.run]
core = "{core_to_set}"
""",
)
cov = coverage.Coverage()
assert cov.config.core == core_to_set
| ConfigTest |
python | TheAlgorithms__Python | data_structures/binary_tree/basic_binary_tree.py | {
"start": 700,
"end": 2678
} | class ____:
root: Node
def __iter__(self) -> Iterator[int]:
return iter(self.root)
def __len__(self) -> int:
return len(self.root)
@classmethod
def small_tree(cls) -> BinaryTree:
"""
Return a small binary tree with 3 nodes.
>>> binary_tree = BinaryTree.small_tree()
>>> len(binary_tree)
3
>>> list(binary_tree)
[1, 2, 3]
"""
binary_tree = BinaryTree(Node(2))
binary_tree.root.left = Node(1)
binary_tree.root.right = Node(3)
return binary_tree
@classmethod
def medium_tree(cls) -> BinaryTree:
"""
Return a medium binary tree with 3 nodes.
>>> binary_tree = BinaryTree.medium_tree()
>>> len(binary_tree)
7
>>> list(binary_tree)
[1, 2, 3, 4, 5, 6, 7]
"""
binary_tree = BinaryTree(Node(4))
binary_tree.root.left = two = Node(2)
two.left = Node(1)
two.right = Node(3)
binary_tree.root.right = five = Node(5)
five.right = six = Node(6)
six.right = Node(7)
return binary_tree
def depth(self) -> int:
"""
Returns the depth of the tree
>>> BinaryTree(Node(1)).depth()
1
>>> BinaryTree.small_tree().depth()
2
>>> BinaryTree.medium_tree().depth()
4
"""
return self._depth(self.root)
def _depth(self, node: Node | None) -> int:
if not node:
return 0
return 1 + max(self._depth(node.left), self._depth(node.right))
def is_full(self) -> bool:
"""
Returns True if the tree is full
>>> BinaryTree(Node(1)).is_full()
True
>>> BinaryTree.small_tree().is_full()
True
>>> BinaryTree.medium_tree().is_full()
False
"""
return self.root.is_full()
if __name__ == "__main__":
import doctest
doctest.testmod()
| BinaryTree |
python | walkccc__LeetCode | solutions/2839. Check if Strings Can be Made Equal With Operations I/2839.py | {
"start": 0,
"end": 459
} | class ____:
def canBeEqual(self, s1: str, s2: str) -> bool:
def swappedStrings(s: str) -> list[str]:
chars = list(s)
return [chars,
''.join([chars[2], chars[1], chars[0], chars[3]]),
''.join([chars[0], chars[3], chars[2], chars[1]]),
''.join([chars[2], chars[3], chars[0], chars[1]])]
return any(a == b
for a in swappedStrings(s1)
for b in swappedStrings(s2))
| Solution |
python | prabhupant__python-ds | data_structures/linked_list/pair_swap.py | {
"start": 0,
"end": 428
} | class ____():
def __init__(self, val):
self.val = val
self.next = None
def pair_swap(head):
if head == None or head.next == None:
return head
root = head.next
curr = head
prev = Node(0)
while curr.next:
curr.next = curr.next.next
curr.next.next = curr
prev.next = curr.next.next
prev = curr.next
curr = curr.next.next
return root
| Node |
python | RaRe-Technologies__gensim | gensim/topic_coherence/text_analysis.py | {
"start": 4588,
"end": 6930
} | class ____(BaseAnalyzer):
"""A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.
The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id
mapping.
Attributes
----------
relevant_words : set
Set of words that occurrences should be accumulated for.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
token2id : dict
Mapping from :class:`~gensim.corpora.dictionary.Dictionary`
"""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : dict
Mapping
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import text_analysis
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> ids = {1: 'foo', 2: 'bar'}
>>> dictionary = Dictionary([['foo', 'bar', 'baz'], ['foo', 'bar', 'bar', 'baz']])
>>> udict = text_analysis.UsesDictionary(ids, dictionary)
>>>
>>> print(udict.relevant_words)
set([u'foo', u'baz'])
"""
super(UsesDictionary, self).__init__(relevant_ids)
self.relevant_words = _ids_to_words(self.relevant_ids, dictionary)
self.dictionary = dictionary
self.token2id = dictionary.token2id
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self._get_occurrences(self.id2contiguous[word_id])
def _word2_contiguous_id(self, word):
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self.id2contiguous[word_id]
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
word_id1 = self._word2_contiguous_id(word1)
word_id2 = self._word2_contiguous_id(word2)
return self._get_co_occurrences(word_id1, word_id2)
| UsesDictionary |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/typing_extensions/test_backported_types.py | {
"start": 6230,
"end": 6697
} | class ____(TypedDict, total=False):
title: Required[str]
year: int
@given(from_type(OtherMovie))
def test_typeddict_required(value):
assert type(value) == dict
assert set(value).issubset({"title", "year"})
assert isinstance(value["title"], str)
if "year" in value:
assert isinstance(value["year"], int)
def test_typeddict_required_must_have():
assert_all_examples(from_type(OtherMovie), lambda movie: "title" in movie)
| OtherMovie |
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/types.py | {
"start": 487,
"end": 594
} | class ____(BaseModel):
"""Default tool function Schema."""
input: str
@dataclass
| DefaultToolFnSchema |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/transformer.py | {
"start": 2579,
"end": 4193
} | class ____(object):
"""Templated context manager.
This class provides syntactic sugar for a stack of objects of known
type. It allows accessing attributes of the object at the top of the stack
directly against this object, which allows for very terse syntax.
For example, this code:
stack = _StateStack(Foo)
stack.enter()
stack.bar
Is equivalent to:
stack = []
stack.append(Foo())
foo = stack[-1]
foo.bar
See _State for more on how this is used.
Attributes:
type: Any, the type of objects that this stack holds
level: int, the current stack depth
stack: List[Any], the actual stack
value: Any, the instance of the object at the top of the stack
"""
def __init__(self, type_):
# Because we override __setattr__, we need to attach these attributes using
# the superclass' setattr.
object.__setattr__(self, 'type', type_)
object.__setattr__(self, '_stack', [])
if not hasattr(type_, 'no_root'):
self.enter()
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def enter(self):
self._stack.append(self.type())
def exit(self):
self._stack.pop()
@property
def stack(self):
return self._stack
@property
def level(self):
return len(self._stack)
@property
def value(self):
return self._stack[-1]
def __iter__(self):
return iter(self._stack)
def __getattr__(self, key):
return getattr(self._stack[-1], key)
def __setattr__(self, key, value):
setattr(self._stack[-1], key, value)
| _StateStack |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_response_output_text.py | {
"start": 1316,
"end": 1607
} | class ____(BaseModel):
annotations: List[Annotation]
"""Ordered list of annotations attached to the response text."""
text: str
"""Assistant generated text."""
type: Literal["output_text"]
"""Type discriminator that is always `output_text`."""
| ChatKitResponseOutputText |
python | ray-project__ray | python/ray/dag/class_node.py | {
"start": 440,
"end": 3088
} | class ____(DAGNode):
"""Represents an actor creation in a Ray task DAG."""
def __init__(
self,
cls,
cls_args,
cls_kwargs,
cls_options,
other_args_to_resolve=None,
):
self._body = cls
self._last_call: Optional["ClassMethodNode"] = None
super().__init__(
cls_args,
cls_kwargs,
cls_options,
other_args_to_resolve=other_args_to_resolve,
)
if self._contains_input_node():
raise ValueError(
"InputNode handles user dynamic input the DAG, and "
"cannot be used as args, kwargs, or other_args_to_resolve "
"in ClassNode constructor because it is not available at "
"class construction or binding time."
)
def _copy_impl(
self,
new_args: List[Any],
new_kwargs: Dict[str, Any],
new_options: Dict[str, Any],
new_other_args_to_resolve: Dict[str, Any],
):
return ClassNode(
self._body,
new_args,
new_kwargs,
new_options,
other_args_to_resolve=new_other_args_to_resolve,
)
def _execute_impl(self, *args, **kwargs):
"""Executor of ClassNode by ray.remote()
Args and kwargs are to match base class signature, but not in the
implementation. All args and kwargs should be resolved and replaced
with value in bound_args and bound_kwargs via bottom-up recursion when
current node is executed.
"""
return (
ray.remote(self._body)
.options(**self._bound_options)
.remote(*self._bound_args, **self._bound_kwargs)
)
def _contains_input_node(self) -> bool:
"""Check if InputNode is used in children DAGNodes with current node
as the root.
"""
children_dag_nodes = self._get_all_child_nodes()
for child in children_dag_nodes:
if isinstance(child, InputNode):
return True
return False
def __getattr__(self, method_name: str):
# User trying to call .bind() without a bind class method
if method_name == "bind" and "bind" not in dir(self._body):
raise AttributeError(f".bind() cannot be used again on {type(self)} ")
# Raise an error if the method is invalid.
getattr(self._body, method_name)
call_node = _UnboundClassMethodNode(self, method_name, {})
return call_node
def __str__(self) -> str:
return get_dag_node_str(self, str(self._body))
| ClassNode |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 542,
"end": 1476
} | class ____(models.Model):
objects = AuthorManager()
name = models.CharField(max_length=100)
birthday = models.DateTimeField(default=timezone.now)
# issue 2106 - set up a name clash with admin integration form field names
resource = models.SmallIntegerField(null=True, blank=True)
def natural_key(self):
"""
Django pattern function for serializing a model by its natural key
Used only by the ForeignKeyWidget using use_natural_foreign_keys.
"""
return (self.name,)
def __str__(self):
return self.name
def full_clean(self, exclude=None, validate_unique=True):
super().full_clean(exclude, validate_unique)
if exclude is None:
exclude = []
else:
exclude = list(exclude)
if "name" not in exclude and self.name == "123":
raise ValidationError({"name": "'123' is not a valid value"})
| Author |
python | spack__spack | lib/spack/spack/vendor/attr/_make.py | {
"start": 1350,
"end": 1963
} | class ____:
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
``_Nothing`` is a singleton. There is only ever one of it.
.. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
"""
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super().__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
def __bool__(self):
return False
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
| _Nothing |
python | django__django | tests/admin_views/models.py | {
"start": 26275,
"end": 26362
} | class ____(models.Model):
rname = models.CharField(max_length=20, unique=True)
| Recipe |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 9235,
"end": 9758
} | class ____(unittest.TestCase):
def test_search_k1(self):
# verify codepath for k = 1 and k > 1
d = 64
nb = 0
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
miq = faiss.MultiIndexQuantizer(d, 2, 6)
miq.train(xt)
D1, I1 = miq.search(xq, 1)
D5, I5 = miq.search(xq, 5)
self.assertEqual(np.abs(I1[:, :1] - I5[:, :1]).max(), 0)
self.assertEqual(np.abs(D1[:, :1] - D5[:, :1]).max(), 0)
| TestMultiIndexQuantizer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 13017,
"end": 13095
} | class ____(Generic[T]):
def method1(self, x: T) -> T:
return x
| Base7 |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 32157,
"end": 35825
} | class ____(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
self.steps_per_run = 5
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(self.steps_per_run)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
# Saved (step=5)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=10)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=15)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=20)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=25)
self.assertEqual(25,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(10,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
| CheckpointSaverHookMultiStepTest |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 38442,
"end": 39014
} | class ____(testing.TestCase):
def test_segment_sum_call(self):
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
segment_ids = np.array([0, 0, 1], dtype=np.int32)
num_segments = 2
sorted_segments = False
segment_sum_op = kmath.SegmentSum(
num_segments=num_segments, sorted=sorted_segments
)
output = segment_sum_op.call(data, segment_ids)
expected_output = np.array([[5, 7, 9], [7, 8, 9]], dtype=np.float32)
self.assertAllClose(output, expected_output)
| SegmentSumTest |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 6379,
"end": 11251
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
padding_mask: Optional[torch.Tensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
loss: Optional[torch.FloatTensor] = None
############ UTILS ################
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
"""
Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
stops at the corresponding element in `seq_lens`.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
seq_lens (`torch.Tensor` of shape `(batch)`:
Each element represents the length of the sequence at the same index in `hidden_states`
Returns:
`torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
"""
batch_size, mask_seq_len = hidden_states.shape[:2]
indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
mask = hidden_states.new_ones((batch_size, mask_seq_len))
mask = mask.masked_fill(bool_mask, 0)
return mask
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.format_speech_generation_kwargs with SeamlessM4T->SeamlessM4Tv2
def format_speech_generation_kwargs(kwargs):
"""
Format kwargs for SeamlessM4Tv2 models that generate speech, attribute kwargs to either the text generation or the
speech generation models.
Args:
kwargs (`dict`)`:
Keyword arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
"""
# attribute kwargs to models
kwargs_text = {}
kwargs_speech = {}
for key, value in kwargs.items():
if key.startswith("text_"):
key = key[len("text_") :]
kwargs_text[key] = value
elif key.startswith("speech_"):
key = key[len("speech_") :]
kwargs_speech[key] = value
elif key == "generation_config":
kwargs_text[key] = value
else:
# If the key is already in a specific config, then it's been set with a
# submodules specific value and we don't override
if key not in kwargs_text:
kwargs_text[key] = value
if key not in kwargs_speech:
kwargs_speech[key] = value
return kwargs_text, kwargs_speech
############ SPEECH ENCODER related code ################
| SeamlessM4Tv2TextToUnitOutput |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 35502,
"end": 35922
} | class ____(ChainedAssetSelection):
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
return {
asset_key
for asset_key in self.child.resolve_inner(asset_graph, allow_missing=allow_missing)
if asset_key in asset_graph.materializable_asset_keys
}
@whitelist_for_serdes
@record
| MaterializableAssetSelection |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 1290,
"end": 2236
} | class ____:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
| FakeSocket |
python | sympy__sympy | sympy/stats/matrix_distributions.py | {
"start": 14311,
"end": 17816
} | class ____(MatrixDistribution):
_argnames = ('location_matrix', 'scale_matrix_1', 'scale_matrix_2')
@staticmethod
def check(location_matrix, scale_matrix_1, scale_matrix_2):
if not isinstance(scale_matrix_1, MatrixSymbol):
_value_check(scale_matrix_1.is_positive_definite, "The shape "
"matrix must be positive definite.")
if not isinstance(scale_matrix_2, MatrixSymbol):
_value_check(scale_matrix_2.is_positive_definite, "The shape "
"matrix must be positive definite.")
_value_check(scale_matrix_1.is_square, "Scale matrix 1 should be "
"be square matrix")
_value_check(scale_matrix_2.is_square, "Scale matrix 2 should be "
"be square matrix")
n = location_matrix.shape[0]
p = location_matrix.shape[1]
_value_check(scale_matrix_1.shape[0] == n, "Scale matrix 1 should be"
" of shape %s x %s"% (str(n), str(n)))
_value_check(scale_matrix_2.shape[0] == p, "Scale matrix 2 should be"
" of shape %s x %s"% (str(p), str(p)))
@property
def set(self):
n, p = self.location_matrix.shape
return MatrixSet(n, p, S.Reals)
@property
def dimension(self):
return self.location_matrix.shape
def pdf(self, x):
M, U, V = self.location_matrix, self.scale_matrix_1, self.scale_matrix_2
n, p = M.shape
if isinstance(x, list):
x = ImmutableMatrix(x)
if not isinstance(x, (MatrixBase, MatrixSymbol)):
raise ValueError("%s should be an isinstance of Matrix "
"or MatrixSymbol" % str(x))
term1 = Inverse(V)*Transpose(x - M)*Inverse(U)*(x - M)
num = exp(-Trace(term1)/S(2))
den = (2*pi)**(S(n*p)/2) * Determinant(U)**(S(p)/2) * Determinant(V)**(S(n)/2)
return num/den
def MatrixNormal(symbol, location_matrix, scale_matrix_1, scale_matrix_2):
"""
Creates a random variable with Matrix Normal Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
location_matrix: Real ``n x p`` matrix
Represents degrees of freedom
scale_matrix_1: Positive definite matrix
Scale Matrix of shape ``n x n``
scale_matrix_2: Positive definite matrix
Scale Matrix of shape ``p x p``
Returns
=======
RandomSymbol
Examples
========
>>> from sympy import MatrixSymbol
>>> from sympy.stats import density, MatrixNormal
>>> M = MatrixNormal('M', [[1, 2]], [1], [[1, 0], [0, 1]])
>>> X = MatrixSymbol('X', 1, 2)
>>> density(M)(X).doit()
exp(-Trace((Matrix([
[-1],
[-2]]) + X.T)*(Matrix([[-1, -2]]) + X))/2)/(2*pi)
>>> density(M)([[3, 4]]).doit()
exp(-4)/(2*pi)
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_normal_distribution
"""
if isinstance(location_matrix, list):
location_matrix = ImmutableMatrix(location_matrix)
if isinstance(scale_matrix_1, list):
scale_matrix_1 = ImmutableMatrix(scale_matrix_1)
if isinstance(scale_matrix_2, list):
scale_matrix_2 = ImmutableMatrix(scale_matrix_2)
args = (location_matrix, scale_matrix_1, scale_matrix_2)
return rv(symbol, MatrixNormalDistribution, args)
#-------------------------------------------------------------------------------
# Matrix Student's T distribution ---------------------------------------------------
| MatrixNormalDistribution |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | {
"start": 46276,
"end": 48854
} | class ____(XLMRobertaXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| XLMRobertaXLForTokenClassification |
python | ray-project__ray | doc/source/ray-core/doc_code/direct_transport_gloo.py | {
"start": 5668,
"end": 7175
} | class ____:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
self.tensor = torch.randn(1000, 1000)
# After this function returns, Ray and this actor will both hold a
# reference to the same tensor.
return self.tensor
def increment_and_sum_stored_tensor(self):
# NOTE: In-place update, while Ray still holds a reference to the same tensor.
self.tensor += 1
return torch.sum(self.tensor)
def increment_and_sum(self, tensor: torch.Tensor):
return torch.sum(tensor + 1)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
tensor = sender.random_tensor.remote()
tensor1 = sender.increment_and_sum_stored_tensor.remote()
# Wait for sender.increment_and_sum_stored_tensor task to finish.
tensor1 = ray.get(tensor1)
# Receiver will now receive the updated value instead of the original.
tensor2 = receiver.increment_and_sum.remote(tensor)
try:
# This assertion will fail because sender.increment_and_sum_stored_tensor
# modified the tensor in place before sending it to
# receiver.increment_and_sum.
assert torch.allclose(tensor1, ray.get(tensor2))
except AssertionError:
print("AssertionError: sender and receiver returned different sums.")
# __gloo_wait_tensor_freed_bad_end__
# __gloo_wait_tensor_freed_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
| MyActor |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation_py38.py | {
"start": 84,
"end": 170
} | class ____:
def __init__(self, first: Any, /, second: Any) -> None:
pass
| Egg |
python | dask__dask | dask/dataframe/dask_expr/_shuffle.py | {
"start": 23604,
"end": 24755
} | class ____(Expr):
_is_length_preserving = True
def _divisions(self):
if "user_divisions" in self._parameters and self.user_divisions is not None:
return self.user_divisions
if self._npartitions_input == 1:
return (None, None)
if (
is_index_like(self._divisions_column._meta)
and self._divisions_column.known_divisions
and self._divisions_column.npartitions == self.frame.npartitions
):
return self.other.divisions
divisions, mins, maxes, presorted = _get_divisions(
self.frame,
self._divisions_column,
self._npartitions_input,
self.ascending,
upsample=self.upsample,
)
if presorted and len(mins) == self._npartitions_input:
divisions = mins.copy() + [maxes[-1]]
return divisions
@property
def _npartitions_input(self):
return self.operand("npartitions") or self.frame.npartitions
@property
def npartitions(self):
return self.operand("npartitions") or len(self._divisions()) - 1
| BaseSetIndexSortValues |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_embed_image02.py | {
"start": 315,
"end": 911
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("embed_image02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.embed_image(0, 0, self.image_dir + "red.png")
worksheet.embed_image(8, 4, self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 2557,
"end": 3100
} | class ____(OrmClause[T], ABC):
"""Base class for path or query parameters with ORM transformation."""
def __init__(self, value: T | None = None, skip_none: bool = True) -> None:
super().__init__(value)
self.attribute: ColumnElement | InstrumentedAttribute | None = None
self.skip_none = skip_none
def set_value(self, value: T | None) -> Self:
self.value = value
return self
@classmethod
@abstractmethod
def depends(cls, *args: Any, **kwargs: Any) -> Self:
pass
| BaseParam |
python | realpython__materials | torchaudio/speech.py | {
"start": 571,
"end": 2517
} | class ____(NamedTuple):
waveform: Tensor
sample_rate: int
label: str
speaker_id: str
utterance_number: int
@property
def num_channels(self) -> int:
return self.waveform.size(0)
@property
def num_samples(self) -> int:
return self.waveform.size(1)
@property
def num_seconds(self) -> float:
return self.num_samples / self.sample_rate
def play(self) -> None:
sd.play(
self.waveform.numpy().reshape(-1, self.num_channels),
self.sample_rate,
blocking=True,
)
def play_widget(self) -> Audio:
return Audio(
self.waveform.numpy(), rate=self.sample_rate, autoplay=True
)
def save(self, path: str | Path) -> None:
torchaudio.save(path, self.waveform, self.sample_rate)
def apply(self, transform: Callable[[Tensor], Tensor]) -> Self:
return replace(self, waveform=transform(self.waveform))
def resample(self, sample_rate: int) -> Self:
return replace(
self,
sample_rate=sample_rate,
waveform=AF.resample(
self.waveform,
orig_freq=self.sample_rate,
new_freq=sample_rate,
),
)
def pad_trim(self, seconds: int | float) -> Self:
num_samples = int(self.sample_rate * seconds)
if self.num_samples > num_samples:
return replace(self, waveform=self.waveform[:, :num_samples])
elif self.num_samples < num_samples:
padding_amount = num_samples - self.num_samples
return replace(
self, waveform=F.pad(self.waveform, (0, padding_amount))
)
else:
return self
def with_gaussian_noise(self, level=0.01) -> Self:
noise = randn_like(self.waveform) * level
return replace(self, waveform=clamp(self.waveform + noise, -1.0, 1.0))
| SpeechSample |
python | getsentry__sentry | src/sentry/tempest/models.py | {
"start": 420,
"end": 566
} | class ____(models.TextChoices):
ERROR = "error"
WARNING = "warning"
SUCCESS = "success"
INFO = "info"
@region_silo_model
| MessageType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes5.py | {
"start": 5343,
"end": 5529
} | class ____(ParentClass3):
class Config1(ConfigBase): ...
# This should generate an error if reportIncompatibleVariableOverride
# is enabled.
class Config2: ...
| ChildClass3 |
python | kamyu104__LeetCode-Solutions | Python/find-the-kth-smallest-sum-of-a-matrix-with-sorted-rows.py | {
"start": 52,
"end": 966
} | class ____(object):
def kthSmallest(self, mat, k):
"""
:type mat: List[List[int]]
:type k: int
:rtype: int
"""
def kSmallestPairs(nums1, nums2, k):
result, min_heap = [], []
for c in xrange(min(len(nums1), k)):
heapq.heappush(min_heap, (nums1[c]+nums2[0], 0))
c += 1
while len(result) != k and min_heap:
total, c = heapq.heappop(min_heap)
result.append(total)
if c+1 == len(nums2):
continue
heapq.heappush(min_heap, (total-nums2[c]+nums2[c+1], c+1))
return result
result = mat[0]
for r in xrange(1, len(mat)):
result = kSmallestPairs(result, mat[r], k)
return result[k-1]
# Time: O((k + m) * log(m * MAX_NUM)) ~ O(k * m * log(m * MAX_NUM))
# Space: O(m)
| Solution |
python | doocs__leetcode | solution/0100-0199/0100.Same Tree/Solution2.py | {
"start": 192,
"end": 954
} | class ____:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if p == q:
return True
if p is None or q is None:
return False
q1, q2 = deque([p]), deque([q])
while q1 and q2:
a, b = q1.popleft(), q2.popleft()
if a.val != b.val:
return False
la, ra = a.left, a.right
lb, rb = b.left, b.right
if (la and not lb) or (lb and not la):
return False
if (ra and not rb) or (rb and not ra):
return False
if la:
q1.append(la)
q2.append(lb)
if ra:
q1.append(ra)
q2.append(rb)
return True
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 21492,
"end": 21885
} | class ____:
save_steps: int = 20000
team_change: int = attr.ib()
@team_change.default
def _team_change_default(self):
# Assign team_change to about 4x save_steps
return self.save_steps * 5
swap_steps: int = 2000
window: int = 10
play_against_latest_model_ratio: float = 0.5
initial_elo: float = 1200.0
@attr.s(auto_attribs=True)
| SelfPlaySettings |
python | mlflow__mlflow | mlflow/store/model_registry/base_rest_store.py | {
"start": 191,
"end": 1341
} | class ____(AbstractStore):
"""
Base class client for a remote model registry server accessed via REST API calls
"""
__metaclass__ = ABCMeta
def __init__(self, get_host_creds):
super().__init__()
self.get_host_creds = get_host_creds
@abstractmethod
def _get_all_endpoints_from_method(self, method):
pass
@abstractmethod
def _get_endpoint_from_method(self, method):
pass
@abstractmethod
def _get_response_from_method(self, method):
pass
def _call_endpoint(self, api, json_body, call_all_endpoints=False, extra_headers=None):
response_proto = self._get_response_from_method(api)
if call_all_endpoints:
endpoints = self._get_all_endpoints_from_method(api)
return call_endpoints(
self.get_host_creds(), endpoints, json_body, response_proto, extra_headers
)
else:
endpoint, method = self._get_endpoint_from_method(api)
return call_endpoint(
self.get_host_creds(), endpoint, method, json_body, response_proto, extra_headers
)
| BaseRestStore |
python | pytest-dev__pytest | testing/test_skipping.py | {
"start": 23893,
"end": 27263
} | class ____:
def test_skip_class(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
class TestSomething(object):
def test_foo(self):
pass
def test_bar(self):
pass
def test_baz():
pass
"""
)
rec = pytester.inline_run()
rec.assertoutcome(skipped=2, passed=1)
def test_skips_on_false_string(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip('False')
def test_foo():
pass
"""
)
rec = pytester.inline_run()
rec.assertoutcome(skipped=1)
def test_arg_as_reason(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip('testing stuff')
def test_bar():
pass
"""
)
result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*testing stuff*", "*1 skipped*"])
def test_skip_no_reason(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_foo():
pass
"""
)
result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
def test_skip_with_reason(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="for lolz")
def test_bar():
pass
"""
)
result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*for lolz*", "*1 skipped*"])
def test_only_skips_marked_test(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_foo():
pass
@pytest.mark.skip(reason="nothing in particular")
def test_bar():
pass
def test_baz():
assert True
"""
)
result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*nothing in particular*", "*1 passed*2 skipped*"])
def test_strict_and_skip(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_hello():
pass
"""
)
result = pytester.runpytest("-rs", "--strict-markers")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
def test_wrong_skip_usage(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(False, reason="I thought this was skipif")
def test_hello():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*TypeError: *__init__() got multiple values for argument 'reason'"
" - maybe you meant pytest.mark.skipif?"
]
)
| TestSkip |
python | ray-project__ray | python/ray/tests/gcp/test_gcp_tpu_command_runner.py | {
"start": 504,
"end": 9501
} | class ____:
def __init__(self, num_workers: int = 1):
self.num_workers = num_workers
def get_internal_ip(self, worker_index: int) -> str:
return "0.0.0.0"
def get_external_ip(self, worker_index: int) -> str:
return "1.2.3.4"
def get(self, key) -> str:
if key == "name":
return _MOCK_TPU_NAME
elif key == "acceleratorType":
return _MOCK_ACCELERATOR_TYPE
return ""
def test_tpu_ssh_command_runner():
num_workers = 2
process_runner = MockProcessRunner()
provider = MockProvider()
instance = MockTpuInstance(num_workers=num_workers)
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.sha1(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.sha1(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:10], ssh_control_hash[:10]
)
args = {
"instance": instance,
"log_prefix": "prefix",
"node_id": "abc",
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
env_vars = {"var1": 'quote between this " and this', "var2": "123"}
cmd_runner = TPUCommandRunner(**args)
cmd_runner.run(
"echo helloo", port_forward=[(8265, 8265)], environment_variables=env_vars
)
expected = [
"ssh",
"-tt",
"-L",
"8265:localhost:8265",
"-i",
"8265.pem",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"IdentitiesOnly=yes",
"-o",
"ExitOnForwardFailure=yes",
"-o",
"ServerAliveInterval=5",
"-o",
"ServerAliveCountMax=3",
"-o",
"ControlMaster=auto",
"-o",
"ControlPath={}/%C".format(ssh_control_path),
"-o",
"ControlPersist=10s",
"-o",
"ConnectTimeout=120s",
"ray@1.2.3.4",
"bash",
"--login",
"-c",
"-i",
"""'source ~/.bashrc; export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"quote between this \\" and this"'"'"';export var2='"'"'"123"'"'"';echo helloo)'""", # noqa: E501
]
calls = process_runner.calls
# Asserts that we do make the call once per worker in the TPU pod.
assert len(process_runner.calls) == num_workers
# Much easier to debug this loop than the function call.
for i in range(num_workers):
for x, y in zip(calls[i], expected):
assert x == y
def test_tpu_docker_command_runner():
num_workers = 4
process_runner = MockProcessRunner()
provider = MockProvider()
instance = MockTpuInstance(num_workers=num_workers)
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.sha1(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.sha1(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:10], ssh_control_hash[:10]
)
docker_config = {"container_name": "container"}
args = {
"instance": instance,
"log_prefix": "prefix",
"node_id": "0",
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = TPUCommandRunner(**args)
env_vars = {"var1": 'quote between this " and this', "var2": "123"}
cmd_runner.run("echo hello", environment_variables=env_vars)
# This string is insane because there are an absurd number of embedded
# quotes. While this is a ridiculous string, the escape behavior is
# important and somewhat difficult to get right for environment variables.
cmd = """'source ~/.bashrc; export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (docker exec -it container /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'source ~/.bashrc; export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"quote between this \\" and this"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';export var2='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"123"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';echo hello)'"'"'"'"'"'"'"'"''"'"' )'""" # noqa: E501
expected = [
"ssh",
"-tt",
"-i",
"8265.pem",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"IdentitiesOnly=yes",
"-o",
"ExitOnForwardFailure=yes",
"-o",
"ServerAliveInterval=5",
"-o",
"ServerAliveCountMax=3",
"-o",
"ControlMaster=auto",
"-o",
"ControlPath={}/%C".format(ssh_control_path),
"-o",
"ControlPersist=10s",
"-o",
"ConnectTimeout=120s",
"ray@1.2.3.4",
"bash",
"--login",
"-c",
"-i",
cmd,
]
calls = process_runner.calls
# Asserts that we do make the call once per worker in the TPU pod.
assert len(process_runner.calls) == num_workers
# Much easier to debug this loop than the function call.
for i in range(num_workers):
for x, y in zip(calls[i], expected):
assert x == y
def test_tpu_docker_run_init():
num_workers = 1
process_runner = MockProcessRunner()
provider = MockProvider()
instance = MockTpuInstance(num_workers=num_workers)
provider.create_node({}, {}, 1)
cluster_name = "cluster"
docker_config = {
"container_name": "container",
"image": "rayproject/ray:latest",
}
args = {
"instance": instance,
"log_prefix": "prefix",
"node_id": "0",
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = TPUCommandRunner(**args)
# Taken from tests/test_command_runner.py
# This mocks the response of 'docker inspect' command to return an empty JSON array.
# This simulates the scenario where the Docker image has no set environment
# variables, allowing us to test the subsequent code for handling this case.
process_runner.respond_to_call("json .Config.Env", 2 * ["[]"])
cmd_runner.run_init(as_head=True, file_mounts={}, sync_run_yet=True)
process_runner.assert_has_call("1.2.3.4", pattern="docker")
def test_max_active_connections_env_var():
num_workers = 2
process_runner = MockProcessRunner()
provider = MockProvider()
instance = MockTpuInstance(num_workers=num_workers)
provider.create_node({}, {}, 1)
cluster_name = "cluster"
docker_config = {"container_name": "container"}
args = {
"instance": instance,
"log_prefix": "prefix",
"node_id": "0",
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = TPUCommandRunner(**args)
os.environ[ray_constants.RAY_TPU_MAX_CONCURRENT_CONNECTIONS_ENV_VAR] = "1"
num_connections = cmd_runner.num_connections
assert type(num_connections) is int
assert num_connections == 1
def test_tpu_pod_resources():
num_workers = 2
process_runner = MockProcessRunner()
provider = MockProvider()
instance = MockTpuInstance(num_workers=num_workers)
provider.create_node({}, {}, 1)
cluster_name = "cluster"
args = {
"instance": instance,
"log_prefix": "prefix",
"node_id": "abc",
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
env_vars = {
ray_constants.RESOURCES_ENVIRONMENT_VARIABLE: {
"TPU": 4,
f"TPU-{_MOCK_ACCELERATOR_TYPE}-head": 1,
},
}
def test_command_run(self, environment_variables, **kwargs):
resources = environment_variables[ray_constants.RESOURCES_ENVIRONMENT_VARIABLE]
if self._worker_id == 0:
assert f"TPU-{_MOCK_ACCELERATOR_TYPE}-head" in resources
else:
assert f"TPU-{_MOCK_ACCELERATOR_TYPE}-head" not in resources
with patch.object(SSHCommandRunner, "run", new=test_command_run):
cmd_runner = TPUCommandRunner(**args)
cmd_runner.run(
"echo helloo", port_forward=[(8265, 8265)], environment_variables=env_vars
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| MockTpuInstance |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 4937,
"end": 5122
} | class ____(graphene.Union):
class Meta:
types = (GrapheneRunGroup, GrapheneRunGroupNotFoundError, GraphenePythonError)
name = "RunGroupOrError"
| GrapheneRunGroupOrError |
python | getsentry__sentry | tests/snuba/tsdb/test_tsdb_backend.py | {
"start": 1937,
"end": 21611
} | class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.db = SnubaTSDB()
self.now = before_now(hours=4).replace(hour=0, minute=0, second=0, microsecond=0)
self.proj1 = self.create_project()
env1 = "test"
env2 = "dev"
defaultenv = ""
release1 = "1" * 10
release2 = "2" * 10
self.release1 = Release.objects.create(
organization_id=self.organization.id, version=release1, date_added=self.now
)
self.release1.add_project(self.proj1)
self.release2 = Release.objects.create(
organization_id=self.organization.id, version=release2, date_added=self.now
)
self.release2.add_project(self.proj1)
for r in range(0, 14400, 600): # Every 10 min for 4 hours
self.store_event(
data={
"event_id": (str(r) * 32)[:32],
"message": "message 1",
"platform": "python",
"fingerprint": [["group-1"], ["group-2"]][
(r // 600) % 2
], # Switch every 10 mins
"timestamp": (self.now + timedelta(seconds=r)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
"region": ["US", "EU"][(r // 7200) % 3],
# Switch every 2 hours
"environment": [env1, None][(r // 7200) % 3],
"sentry:user": f"id:user{r // 3300}",
},
"user": {
# change every 55 min so some hours have 1 user, some have 2
"id": f"user{r // 3300}",
},
"release": str(r // 3600) * 10, # 1 per hour,
},
project_id=self.proj1.id,
)
groups = Group.objects.filter(project=self.proj1).order_by("id")
self.proj1group1 = groups[0]
self.proj1group2 = groups[1]
self.env1 = Environment.objects.get(name=env1)
self.env2 = self.create_environment(name=env2) # No events
self.defaultenv = Environment.objects.get(name=defaultenv)
self.group1release1env1 = GroupRelease.objects.get(
project_id=self.proj1.id,
group_id=self.proj1group1.id,
release_id=self.release1.id,
environment=env1,
)
self.group1release2env1 = GroupRelease.objects.create(
project_id=self.proj1.id,
group_id=self.proj1group1.id,
release_id=self.release2.id,
environment=env1,
)
self.group2release1env1 = GroupRelease.objects.get(
project_id=self.proj1.id,
group_id=self.proj1group2.id,
release_id=self.release1.id,
environment=env1,
)
def test_range_single(self) -> None:
env1 = "test"
project = self.create_project()
for r in range(0, 600 * 6 * 4, 300): # Every 10 min for 4 hours
self.store_event(
data={
"event_id": (str(r) * 32)[:32],
"message": "message 1",
"platform": "python",
"fingerprint": ["group-1"],
"timestamp": (self.now + timedelta(seconds=r)).isoformat(),
"tags": {
"foo": "bar",
"baz": "quux",
# Switch every 2 hours
"region": "US",
"environment": [env1, None][(r // 7200) % 3],
"sentry:user": f"id:user{r // 3300}",
},
"user": {
# change every 55 min so some hours have 1 user, some have 2
"id": f"user{r // 3300}",
},
"release": str(r // 3600) * 10, # 1 per hour,
},
project_id=project.id,
)
groups = Group.objects.filter(project=project).order_by("id")
group = groups[0]
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_range(
TSDBModel.group,
[group.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
group.id: [
(timestamp(dts[0]), 6 * 2),
(timestamp(dts[1]), 6 * 2),
(timestamp(dts[2]), 6 * 2),
(timestamp(dts[3]), 6 * 2),
]
}
def test_range_groups(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_range(
TSDBModel.group,
[self.proj1group1.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: [
(timestamp(dts[0]), 3),
(timestamp(dts[1]), 3),
(timestamp(dts[2]), 3),
(timestamp(dts[3]), 3),
]
}
# Multiple groups
assert self.db.get_range(
TSDBModel.group,
[self.proj1group1.id, self.proj1group2.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: [
(timestamp(dts[0]), 3),
(timestamp(dts[1]), 3),
(timestamp(dts[2]), 3),
(timestamp(dts[3]), 3),
],
self.proj1group2.id: [
(timestamp(dts[0]), 3),
(timestamp(dts[1]), 3),
(timestamp(dts[2]), 3),
(timestamp(dts[3]), 3),
],
}
assert (
self.db.get_range(
TSDBModel.group,
[],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "test", "organization_id": 1},
)
== {}
)
def test_range_releases(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_range(
TSDBModel.release,
[self.release1.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.release1.id: [
(timestamp(dts[0]), 0),
(timestamp(dts[1]), 6),
(timestamp(dts[2]), 0),
(timestamp(dts[3]), 0),
]
}
def test_range_project(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1.id: [
(timestamp(dts[0]), 6),
(timestamp(dts[1]), 6),
(timestamp(dts[2]), 6),
(timestamp(dts[3]), 6),
]
}
def test_range_environment_filter(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=3600,
environment_ids=[self.env1.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1.id: [
(timestamp(dts[0]), 6),
(timestamp(dts[1]), 6),
(timestamp(dts[2]), 0),
(timestamp(dts[3]), 0),
]
}
# No events submitted for env2
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=3600,
environment_ids=[self.env2.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1.id: [
(timestamp(dts[0]), 0),
(timestamp(dts[1]), 0),
(timestamp(dts[2]), 0),
(timestamp(dts[3]), 0),
]
}
# Events submitted with no environment should match default environment
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=3600,
environment_ids=[self.defaultenv.id],
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1.id: [
(timestamp(dts[0]), 0),
(timestamp(dts[1]), 0),
(timestamp(dts[2]), 6),
(timestamp(dts[3]), 6),
]
}
def test_range_rollups(self) -> None:
# Daily
daystart = self.now.replace(hour=0) # day buckets start on day boundaries
dts = [daystart + timedelta(days=i) for i in range(2)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=86400,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {self.proj1.id: [(timestamp(dts[0]), 24), (timestamp(dts[1]), 0)]}
# Minutely
dts = [self.now + timedelta(minutes=i) for i in range(120)]
# Expect every 10th minute to have a 1, else 0
expected = [(d.timestamp(), 1 if i % 10 == 0 else 0) for i, d in enumerate(dts)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=60,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {self.proj1.id: expected}
def test_distinct_counts_series_users(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_distinct_counts_series(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: [
(timestamp(dts[0]), 1),
(timestamp(dts[1]), 1),
(timestamp(dts[2]), 1),
(timestamp(dts[3]), 2),
]
}
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_distinct_counts_series(
TSDBModel.users_affected_by_project,
[self.proj1.id],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1.id: [
(timestamp(dts[0]), 1),
(timestamp(dts[1]), 2),
(timestamp(dts[2]), 2),
(timestamp(dts[3]), 2),
]
}
assert (
self.db.get_distinct_counts_series(
TSDBModel.users_affected_by_group,
[],
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== {}
)
def test_get_distinct_counts_totals_users(self) -> None:
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: 5 # 5 unique users overall
}
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
self.now,
self.now,
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: 1 # Only 1 unique user in the first hour
}
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_project,
[self.proj1.id],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {self.proj1.id: 5}
assert (
self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== {}
)
def test_get_distinct_counts_totals_users__with_conditions(self) -> None:
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
conditions=[("tags[region]", "=", "US")],
) == {
self.proj1group1.id: 2 # 5 unique users with US tag
}
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
conditions=[("tags[region]", "=", "EU")],
) == {
self.proj1group1.id: 3 # 3 unique users with EU tag
}
assert self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[self.proj1group1.id],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
conditions=[("tags[region]", "=", "MARS")],
) == {self.proj1group1.id: 0}
assert (
self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_group,
[],
self.now,
self.now + timedelta(hours=4),
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== {}
)
def test_frequency_series(self) -> None:
dts = [self.now + timedelta(hours=i) for i in range(4)]
assert self.db.get_frequency_series(
TSDBModel.frequent_releases_by_group,
{
self.proj1group1.id: (self.group1release1env1.id, self.group1release2env1.id),
self.proj1group2.id: (self.group2release1env1.id,),
},
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
) == {
self.proj1group1.id: [
(timestamp(dts[0]), {self.group1release1env1.id: 0, self.group1release2env1.id: 0}),
(timestamp(dts[1]), {self.group1release1env1.id: 3, self.group1release2env1.id: 0}),
(timestamp(dts[2]), {self.group1release1env1.id: 0, self.group1release2env1.id: 3}),
(timestamp(dts[3]), {self.group1release1env1.id: 0, self.group1release2env1.id: 0}),
],
self.proj1group2.id: [
(timestamp(dts[0]), {self.group2release1env1.id: 0}),
(timestamp(dts[1]), {self.group2release1env1.id: 3}),
(timestamp(dts[2]), {self.group2release1env1.id: 0}),
(timestamp(dts[3]), {self.group2release1env1.id: 0}),
],
}
assert (
self.db.get_frequency_series(
TSDBModel.frequent_releases_by_group,
{},
dts[0],
dts[-1],
rollup=3600,
tenant_ids={"referrer": "r", "organization_id": 1234},
)
== {}
)
def test_result_shape(self) -> None:
"""
Tests that the results from the different TSDB methods have the
expected format.
"""
project_id = self.proj1.id
dts = [self.now + timedelta(hours=i) for i in range(4)]
items = {
# {project_id: (issue_id, issue_id, ...)}
project_id: (self.proj1group1.id, self.proj1group2.id)
}
results1 = self.db.get_frequency_series(
TSDBModel.frequent_issues_by_project,
items,
dts[0],
dts[-1],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert has_shape(results1, {1: [(1, {1: 1})]})
results2 = self.db.get_range(
TSDBModel.project,
[project_id],
dts[0],
dts[-1],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert has_shape(results2, {1: [(1, 1)]})
results3 = self.db.get_distinct_counts_series(
TSDBModel.users_affected_by_project,
[project_id],
dts[0],
dts[-1],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert has_shape(results3, {1: [(1, 1)]})
results4 = self.db.get_distinct_counts_totals(
TSDBModel.users_affected_by_project,
[project_id],
dts[0],
dts[-1],
tenant_ids={"referrer": "r", "organization_id": 1234},
)
assert has_shape(results4, {1: 1})
def test_calculated_limit(self) -> None:
with patch("sentry.tsdb.snuba.raw_snql_query") as snuba:
# 24h test
rollup = 3600
end = self.now
start = end + timedelta(days=-1, seconds=rollup)
self.db.get_data(TSDBModel.group, [1, 2, 3, 4, 5], start, end, rollup=rollup)
assert snuba.call_args.args[0].query.limit == Limit(120)
# 14 day test
rollup = 86400
start = end + timedelta(days=-14, seconds=rollup)
self.db.get_data(TSDBModel.group, [1, 2, 3, 4, 5], start, end, rollup=rollup)
assert snuba.call_args.args[0].query.limit == Limit(70)
# 1h test
rollup = 3600
end = self.now
start = end + timedelta(hours=-1, seconds=rollup)
self.db.get_data(TSDBModel.group, [1, 2, 3, 4, 5], start, end, rollup=rollup)
assert snuba.call_args.args[0].query.limit == Limit(5)
@patch("sentry.utils.snuba.OVERRIDE_OPTIONS", new={"consistent": True})
def test_tsdb_with_consistent(self) -> None:
with patch("sentry.utils.snuba._apply_cache_and_build_results") as snuba:
rollup = 3600
end = self.now
start = end + timedelta(days=-1, seconds=rollup)
self.db.get_data(TSDBModel.group, [1, 2, 3, 4, 5], start, end, rollup=rollup)
assert snuba.call_args.args[0][0].request.query.limit == Limit(120)
assert snuba.call_args.args[0][0].request.flags.consistent is True
| SnubaTSDBTest |
python | huggingface__transformers | src/transformers/models/modernbert/modeling_modernbert.py | {
"start": 55895,
"end": 60067
} | class ____(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
sliding_window_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
indices: Optional[torch.Tensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
batch_size: Optional[int] = None,
seq_len: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
sliding_window_mask=sliding_window_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
batch_size=batch_size,
seq_len=seq_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0]
last_hidden_state = self.head(last_hidden_state)
last_hidden_state = self.drop(last_hidden_state)
logits = self.classifier(last_hidden_state)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| ModernBertForTokenClassification |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 904,
"end": 1025
} | class ____:
name: str
age: int = dataclasses.field(metadata={"serializer": custom_serializer})
| CustomFieldDataclass |
python | redis__redis-py | tests/test_maint_notifications_handling.py | {
"start": 6862,
"end": 15833
} | class ____:
"""Mock socket that simulates Redis protocol responses."""
def __init__(self):
self.connected = False
self.address = None
self.sent_data = []
self.closed = False
self.command_count = 0
self.pending_responses = []
# Track socket timeout changes for maintenance notifications validation
self.timeout = None
self.thread_timeouts = {} # Track last applied timeout per thread
self.moving_sent = False
def connect(self, address):
"""Simulate socket connection."""
self.connected = True
self.address = address
def send(self, data):
"""Simulate sending data to Redis."""
if self.closed:
raise ConnectionError("Socket is closed")
self.sent_data.append(data)
# Analyze the command and prepare appropriate response
if b"HELLO" in data:
response = b"%7\r\n$6\r\nserver\r\n$5\r\nredis\r\n$7\r\nversion\r\n$5\r\n7.4.0\r\n$5\r\nproto\r\n:3\r\n$2\r\nid\r\n:1\r\n$4\r\nmode\r\n$10\r\nstandalone\r\n$4\r\nrole\r\n$6\r\nmaster\r\n$7\r\nmodules\r\n*0\r\n"
self.pending_responses.append(response)
elif b"MAINT_NOTIFICATIONS" in data and b"internal-ip" in data:
# Simulate error response - activate it only for internal-ip tests
response = b"+ERROR\r\n"
self.pending_responses.append(response)
elif b"SET" in data:
response = b"+OK\r\n"
# Check if this is a key that should trigger a push message
if b"key_receive_migrating_" in data or b"key_receive_migrating" in data:
# MIGRATING push message before SET key_receive_migrating_X response
# Format: >3\r\n$9\r\nMIGRATING\r\n:1\r\n:10\r\n (3 elements: MIGRATING, id, ttl)
migrating_push = ">3\r\n$9\r\nMIGRATING\r\n:1\r\n:10\r\n"
response = migrating_push.encode() + response
elif b"key_receive_migrated_" in data or b"key_receive_migrated" in data:
# MIGRATED push message before SET key_receive_migrated_X response
# Format: >2\r\n$8\r\nMIGRATED\r\n:1\r\n (2 elements: MIGRATED, id)
migrated_push = ">2\r\n$8\r\nMIGRATED\r\n:1\r\n"
response = migrated_push.encode() + response
elif (
b"key_receive_failing_over_" in data
or b"key_receive_failing_over" in data
):
# FAILING_OVER push message before SET key_receive_failing_over_X response
# Format: >3\r\n$12\r\nFAILING_OVER\r\n:1\r\n:10\r\n (3 elements: FAILING_OVER, id, ttl)
failing_over_push = ">3\r\n$12\r\nFAILING_OVER\r\n:1\r\n:10\r\n"
response = failing_over_push.encode() + response
elif (
b"key_receive_failed_over_" in data
or b"key_receive_failed_over" in data
):
# FAILED_OVER push message before SET key_receive_failed_over_X response
# Format: >2\r\n$11\r\nFAILED_OVER\r\n:1\r\n (2 elements: FAILED_OVER, id)
failed_over_push = ">2\r\n$11\r\nFAILED_OVER\r\n:1\r\n"
response = failed_over_push.encode() + response
elif b"key_receive_moving_none_" in data:
# MOVING push message before SET key_receive_moving_none_X response
# Format: >4\r\n$6\r\nMOVING\r\n:1\r\n:1\r\n+null\r\n (4 elements: MOVING, id, ttl, null)
# Note: Using + instead of $ to send as simple string instead of bulk string
moving_push = f">4\r\n$6\r\nMOVING\r\n:1\r\n:{MOVING_TIMEOUT}\r\n_\r\n"
response = moving_push.encode() + response
elif b"key_receive_moving_" in data:
# MOVING push message before SET key_receive_moving_X response
# Format: >4\r\n$6\r\nMOVING\r\n:1\r\n:1\r\n+1.2.3.4:6379\r\n (4 elements: MOVING, id, ttl, host:port)
# Note: Using + instead of $ to send as simple string instead of bulk string
moving_push = f">4\r\n$6\r\nMOVING\r\n:1\r\n:{MOVING_TIMEOUT}\r\n+{AFTER_MOVING_ADDRESS}\r\n"
response = moving_push.encode() + response
self.pending_responses.append(response)
elif b"GET" in data:
# Extract key and provide appropriate response
if b"hello" in data:
response = b"$5\r\nworld\r\n"
self.pending_responses.append(response)
# Handle specific keys used in tests
elif b"key_receive_moving_0" in data:
self.pending_responses.append(b"$8\r\nvalue3_0\r\n")
elif b"key_receive_migrated_0" in data:
self.pending_responses.append(b"$13\r\nmigrated_value\r\n")
elif b"key_receive_migrating" in data:
self.pending_responses.append(b"$6\r\nvalue2\r\n")
elif b"key_receive_migrated" in data:
self.pending_responses.append(b"$6\r\nvalue3\r\n")
elif b"key_receive_failing_over" in data:
self.pending_responses.append(b"$6\r\nvalue4\r\n")
elif b"key_receive_failed_over" in data:
self.pending_responses.append(b"$6\r\nvalue5\r\n")
elif b"key1" in data:
self.pending_responses.append(b"$6\r\nvalue1\r\n")
else:
self.pending_responses.append(b"$-1\r\n") # NULL response
else:
self.pending_responses.append(b"+OK\r\n") # Default response
self.command_count += 1
return len(data)
def sendall(self, data):
"""Simulate sending all data to Redis."""
return self.send(data)
def recv(self, bufsize):
"""Simulate receiving data from Redis."""
if self.closed:
raise ConnectionError("Socket is closed")
# Use pending responses that were prepared when commands were sent
if self.pending_responses:
response = self.pending_responses.pop(0)
if b"MOVING" in response:
self.moving_sent = True
return response[:bufsize] # Respect buffer size
else:
# No data available - this should block or raise an exception
# For can_read checks, we should indicate no data is available
import errno
raise BlockingIOError(errno.EAGAIN, "Resource temporarily unavailable")
def recv_into(self, buffer, nbytes=0):
"""
Receive data from Redis and write it into the provided buffer.
Returns the number of bytes written.
This method is used by the hiredis parser for efficient data reading.
"""
if self.closed:
raise ConnectionError("Socket is closed")
# Use pending responses that were prepared when commands were sent
if self.pending_responses:
response = self.pending_responses.pop(0)
if b"MOVING" in response:
self.moving_sent = True
# Determine how many bytes to write
if nbytes == 0:
nbytes = len(buffer)
# Write data into the buffer (up to nbytes or response length)
bytes_to_write = min(len(response), nbytes, len(buffer))
buffer[:bytes_to_write] = response[:bytes_to_write]
return bytes_to_write
else:
# No data available - this should block or raise an exception
# For can_read checks, we should indicate no data is available
import errno
raise BlockingIOError(errno.EAGAIN, "Resource temporarily unavailable")
def fileno(self):
"""Return a fake file descriptor for select/poll operations."""
return 1 # Fake file descriptor
def close(self):
"""Simulate closing the socket."""
self.closed = True
self.connected = False
self.address = None
self.timeout = None
self.thread_timeouts = {}
def settimeout(self, timeout):
"""Simulate setting socket timeout and track changes per thread."""
self.timeout = timeout
# Track last applied timeout with thread_id information added
thread_id = threading.current_thread().ident
self.thread_timeouts[thread_id] = timeout
def gettimeout(self):
"""Simulate getting socket timeout."""
return self.timeout
def setsockopt(self, level, optname, value):
"""Simulate setting socket options."""
pass
def setblocking(self, blocking):
pass
def getpeername(self):
"""Simulate getting peer name."""
return self.address
def getsockname(self):
"""Simulate getting socket name."""
return (self.address.split(":")[0], 12345)
def shutdown(self, how):
"""Simulate socket shutdown."""
pass
| MockSocket |
python | getsentry__sentry | src/sentry/conf/types/logging_config.py | {
"start": 170,
"end": 258
} | class ____(_DictConfigArgs):
default_level: str
overridable: list[str]
| LoggingConfig |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/progress.py | {
"start": 2146,
"end": 3008
} | class ____(_ReadyCompletedTracker):
"""Track an event's progress.
Args:
ready: Intended to track the number of events ready to start.
started: Intended to be incremented after the event is started (e.g. after ``on_*_start`` runs).
completed: Intended to be incremented after the event completes (e.g. after ``on_*_end`` runs).
These attributes should be increased in order, that is, :attr:`ready` first and :attr:`completed` last.
"""
started: int = 0
@override
def reset(self) -> None:
super().reset()
self.started = 0
@override
def reset_on_restart(self) -> None:
super().reset_on_restart()
self.started = self.completed
@override
def increment_by(self, n: int) -> None:
super().increment_by(n)
self.started += n
@dataclass
| _StartedTracker |
python | PrefectHQ__prefect | tests/test_flow_engine.py | {
"start": 2624,
"end": 3570
} | class ____:
def test_basic_init(self):
engine = AsyncFlowRunEngine(flow=foo)
assert isinstance(engine.flow, Flow)
assert engine.flow.name == "foo"
assert engine.parameters == {}
def test_empty_init(self):
with pytest.raises(
TypeError, match="missing 1 required positional argument: 'flow'"
):
AsyncFlowRunEngine()
def test_client_attr_raises_informative_error(self):
engine = AsyncFlowRunEngine(flow=foo)
with pytest.raises(RuntimeError, match="not started"):
engine.client
async def test_client_attr_returns_client_after_starting(self):
engine = AsyncFlowRunEngine(flow=foo)
async with engine.initialize_run():
client = engine.client
assert isinstance(client, PrefectClient)
with pytest.raises(RuntimeError, match="not started"):
engine.client
| TestAsyncFlowRunEngine |
python | ray-project__ray | python/ray/runtime_env/types/pip.py | {
"start": 71,
"end": 134
} | class ____:
packages: List[str]
pip_check: bool = False
| Pip |
python | jazzband__django-waffle | waffle/migrations/0002_auto_20161201_0958.py | {
"start": 92,
"end": 413
} | class ____(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
| Migration |
python | donnemartin__interactive-coding-challenges | arrays_strings/priority_queue/priority_queue.py | {
"start": 206,
"end": 932
} | class ____(object):
def __init__(self):
self.array = []
def __len__(self):
return len(self.array)
def insert(self, node):
self.array.append(node)
return self.array[-1]
def extract_min(self):
if not self.array:
return None
minimum = sys.maxsize
for index, node in enumerate(self.array):
if node.key < minimum:
minimum = node.key
minimum_index = index
return self.array.pop(minimum_index)
def decrease_key(self, obj, new_key):
for node in self.array:
if node.obj is obj:
node.key = new_key
return node
return None
| PriorityQueue |
python | wandb__wandb | wandb/sdk/launch/inputs/internal.py | {
"start": 2089,
"end": 9807
} | class ____:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self) -> None:
if not hasattr(self, "_staged_inputs"):
self._staged_inputs: List[JobInputArguments] = []
def add_staged_input(
self,
input_arguments: JobInputArguments,
):
self._staged_inputs.append(input_arguments)
def apply(self, run: wandb.Run):
"""Apply the staged inputs to the given run."""
for input in self._staged_inputs:
_publish_job_input(input, run)
def _publish_job_input(
input: JobInputArguments,
run: wandb.Run,
) -> None:
"""Publish a job input to the backend interface of the given run.
Arguments:
input (JobInputArguments): The arguments for the job input.
run (wandb.Run): The run to publish the job input to.
"""
assert run._backend is not None
assert run._backend.interface is not None
assert input.run_config is not None
interface = run._backend.interface
if input.file_path:
config_dir = ConfigTmpDir()
dest = os.path.join(config_dir.configs_dir, input.file_path)
run.save(dest, base_path=config_dir.tmp_dir)
interface.publish_job_input(
include_paths=[_split_on_unesc_dot(path) for path in input.include]
if input.include
else [],
exclude_paths=[_split_on_unesc_dot(path) for path in input.exclude]
if input.exclude
else [],
input_schema=input.schema,
run_config=input.run_config,
file_path=input.file_path or "",
)
def _replace_refs_and_allofs(schema: dict, defs: Optional[dict]) -> dict:
"""Recursively fix JSON schemas with common issues.
1. Replaces any instances of $ref with their associated definition in defs
2. Removes any "allOf" lists that only have one item, "lifting" the item up
See test_internal.py for examples
"""
ret: Dict[str, Any] = {}
if "$ref" in schema and defs:
# Reference found, replace it with its definition
def_key = schema.pop("$ref").split("#/$defs/")[1]
# Also run recursive replacement in case a ref contains more refs
ret = _replace_refs_and_allofs(defs[def_key], defs)
for key, val in schema.items():
if isinstance(val, dict):
# Step into dicts recursively
new_val_dict = _replace_refs_and_allofs(val, defs)
ret[key] = new_val_dict
elif isinstance(val, list):
# Step into each item in the list
new_val_list = []
for item in val:
if isinstance(item, dict):
new_val_list.append(_replace_refs_and_allofs(item, defs))
else:
new_val_list.append(item)
# Lift up allOf blocks with only one item
if (
key == "allOf"
and len(new_val_list) == 1
and isinstance(new_val_list[0], dict)
):
ret.update(new_val_list[0])
else:
ret[key] = new_val_list
else:
# For anything else (str, int, etc) keep it as-is
ret[key] = val
return ret
def _prepare_schema(schema: Any) -> dict:
"""Prepare a schema for validation.
This function prepares a schema for validation by:
1. Converting a Pydantic model instance or class to a dict
2. Replacing $ref with their associated definition in defs
3. Removing any "allOf" lists that only have one item, "lifting" the item up
We support both an instance of a pydantic BaseModel class (e.g. schema=MySchema(...))
or the BaseModel class itself (e.g. schema=MySchema)
"""
if hasattr(schema, "model_json_schema") and callable(
schema.model_json_schema # type: ignore
):
schema = schema.model_json_schema()
if not isinstance(schema, dict):
raise LaunchError(
"schema must be a dict, Pydantic model instance, or Pydantic model class."
)
defs = schema.pop("$defs", None)
return _replace_refs_and_allofs(schema, defs)
def _validate_schema(schema: dict) -> None:
jsonschema = get_module(
"jsonschema",
required="Setting job schema requires the jsonschema package. Please install it with `pip install 'wandb[launch]'`.",
lazy=False,
)
validator = jsonschema.Draft202012Validator(META_SCHEMA)
errs = sorted(validator.iter_errors(schema), key=str)
if errs:
wandb.termwarn(f"Schema includes unhandled or invalid configurations:\n{errs}")
def handle_config_file_input(
path: str,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
schema: Optional[Any] = None,
):
"""Declare an overridable configuration file for a launch job.
The configuration file is copied to a temporary directory and the path to
the copy is sent to the backend interface of the active run and used to
configure the job builder.
If there is no active run, the configuration file is staged and sent when a
run is created.
"""
config_path_is_valid(path)
override_file(path)
tmp_dir = ConfigTmpDir()
dest = os.path.join(tmp_dir.configs_dir, path)
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(
path,
dest,
)
if schema:
schema = _prepare_schema(schema)
_validate_schema(schema)
arguments = JobInputArguments(
include=include,
exclude=exclude,
schema=schema,
file_path=path,
run_config=False,
)
if wandb.run is not None:
_publish_job_input(arguments, wandb.run)
else:
staged_inputs = StagedLaunchInputs()
staged_inputs.add_staged_input(arguments)
def handle_run_config_input(
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
schema: Optional[Any] = None,
):
"""Declare wandb.config as an overridable configuration for a launch job.
The include and exclude paths are sent to the backend interface of the
active run and used to configure the job builder.
If there is no active run, the include and exclude paths are staged and sent
when a run is created.
"""
if schema:
schema = _prepare_schema(schema)
_validate_schema(schema)
arguments = JobInputArguments(
include=include,
exclude=exclude,
schema=schema,
run_config=True,
file_path=None,
)
if wandb.run is not None:
_publish_job_input(arguments, wandb.run)
else:
stage_inputs = StagedLaunchInputs()
stage_inputs.add_staged_input(arguments)
def _split_on_unesc_dot(path: str) -> List[str]:
r"""Split a string on unescaped dots.
Arguments:
path (str): The string to split.
Raises:
ValueError: If the path has a trailing escape character.
Returns:
List[str]: The split string.
"""
parts = []
part = ""
i = 0
while i < len(path):
if path[i] == BACKSLASH:
if i == len(path) - 1:
raise LaunchError(
f"Invalid config path {path}: trailing {BACKSLASH}.",
)
if path[i + 1] == PERIOD:
part += PERIOD
i += 2
elif path[i] == PERIOD:
parts.append(part)
part = ""
i += 1
else:
part += path[i]
i += 1
if part:
parts.append(part)
return parts
| StagedLaunchInputs |
python | PrefectHQ__prefect | src/prefect/blocks/notifications.py | {
"start": 2482,
"end": 3908
} | class ____(AbstractAppriseNotificationBlock, ABC):
"""
A base class for sending notifications using Apprise, through webhook URLs.
"""
_documentation_url = HttpUrl(
"https://docs.prefect.io/latest/automate/events/automations-triggers#sending-notifications-with-automations"
)
url: SecretStr = Field(
default=...,
title="Webhook URL",
description="Incoming webhook URL used to send notifications.",
examples=["https://hooks.example.com/XXX"],
)
allow_private_urls: bool = Field(
default=True,
description="Whether to allow notifications to private URLs. Defaults to True.",
)
@sync_compatible
async def notify( # pyright: ignore[reportIncompatibleMethodOverride] TODO: update to sync only once base class is updated
self,
body: str,
subject: str | None = None,
):
if not self.allow_private_urls:
try:
validate_restricted_url(self.url.get_secret_value())
except ValueError as exc:
if self._raise_on_failure:
raise NotificationError(str(exc))
raise
await super().notify(body, subject) # pyright: ignore[reportGeneralTypeIssues] TODO: update to sync only once base class is updated
# TODO: Move to prefect-slack once collection block auto-registration is
# available
| AppriseNotificationBlock |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 14628,
"end": 16573
} | class ____(_Node):
"""Specialization of _Node to ResourceGather."""
def convert_variable_to_constant(self, incoming_edge, tensor_data):
# We currently skip the conversion if this is inside a function.
if self._function is not None:
return
if self._node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims must be 0 for freeze_graph, but got "
f"node({self._node.name}).attr('batch_dims') = "
f"{self._node.attr['batch_dims'].i}.")
axis_node_name = self._node.name + "/axis"
axis_dtype = self._node.attr["Tindices"]
axis_data = np.array(self._node.attr["batch_dims"].i)
converted_graph = self._enclosing_graph.converted_self()
# Add Const axis node, or get it if it exists to avoid duplicates.
if axis_node_name not in converted_graph.nodes:
converted_graph.nodes[axis_node_name] = _Node.new(
node=converted_graph.graph_def.node.add(),
function=self._function,
enclosing_graph=converted_graph)
output_axis_node = converted_graph.nodes[axis_node_name].node
output_axis_node.name = axis_node_name
output_axis_node.op = "Const"
output_axis_node.attr["dtype"].CopyFrom(axis_dtype)
tensor = tensor_util.make_tensor_proto(
axis_data, dtype=axis_dtype.type, shape=axis_data.shape)
output_axis_node.attr["value"].tensor.CopyFrom(tensor)
output_node = self.converted_self().node
output_node.Clear()
output_node.name = self._node.name
output_node.op = "GatherV2"
output_node.input.extend(
[self._node.input[0], self._node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(self._node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(self._node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in self._node.attr:
output_node.attr["_class"].CopyFrom(self._node.attr["_class"])
| _ResourceGather |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 22919,
"end": 24395
} | class ____(ExitCodeChecks):
def setUp(self):
super().setUp()
self.system = ip.system_raw
@onlyif_unicode_paths
def test_1(self):
"""Test system_raw with non-ascii cmd"""
cmd = """python -c "'åäö'" """
ip.system_raw(cmd)
@mock.patch("subprocess.call", side_effect=KeyboardInterrupt)
@mock.patch("os.system", side_effect=KeyboardInterrupt)
def test_control_c(self, *mocks):
try:
self.system("sleep 1 # won't happen")
except KeyboardInterrupt: # pragma: no cove
self.fail(
"system call should intercept "
"keyboard interrupt from subprocess.call"
)
self.assertEqual(ip.user_ns["_exit_code"], -signal.SIGINT)
@pytest.mark.parametrize("magic_cmd", ["pip", "conda", "cd"])
def test_magic_warnings(magic_cmd):
if sys.platform == "win32":
to_mock = "os.system"
expected_arg, expected_kwargs = magic_cmd, dict()
else:
to_mock = "subprocess.call"
expected_arg, expected_kwargs = magic_cmd, dict(
shell=True, executable=os.environ.get("SHELL", None)
)
with mock.patch(to_mock, return_value=0) as mock_sub:
with pytest.warns(Warning, match=r"You executed the system command"):
ip.system_raw(magic_cmd)
mock_sub.assert_called_once_with(expected_arg, **expected_kwargs)
# TODO: Exit codes are currently ignored on Windows.
| TestSystemRaw |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 157485,
"end": 163852
} | class ____(Qwen2_5OmniPreTrainedModel):
config: Qwen2_5OmniDiTConfig
input_modalities = "audio"
_no_split_modules = ["DiTDecoderLayer"]
def __init__(self, config: Qwen2_5OmniDiTConfig):
super().__init__(config)
self.mel_dim = config.mel_dim
self.repeats = config.repeats
self.time_embed = DiTTimestepEmbedding(config.hidden_size)
self.text_embed = DiTCodecEmbedding(config.num_embeds, config.emb_dim, config.repeats)
self.input_embed = DiTInputEmbedding(config)
self.rotary_embed = Qwen2_5OmniDiTRotaryEmbedding(config=config)
self.hidden_size = config.hidden_size
self.layers = config.num_hidden_layers
self.block_size = config.block_size
self.num_attention_heads = config.num_attention_heads
self.transformer_blocks = nn.ModuleList()
for i in range(config.num_hidden_layers):
self.transformer_blocks.append(
DiTDecoderLayer(
config,
look_ahead_block=1 if i in config.look_ahead_layers else 0,
look_backward_block=1 if i in config.look_backward_layers else 0,
)
)
self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation
self.proj_out = nn.Linear(config.hidden_size, config.mel_dim)
def _create_block_diff(self, hidden_states):
batch, seq_len = hidden_states.shape[0], hidden_states.shape[1]
block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length]
block_i = block_indices.unsqueeze(1) # [seq_length, 1]
block_j = block_indices.unsqueeze(0) # [1, seq_length]
block_diff = block_j - block_i # (n, n)
return block_diff.expand(batch, self.num_attention_heads, seq_len, seq_len)
def forward(
self,
hidden_states,
condition_vector,
speaker_embedding,
quantized_code,
time_step,
drop_audio_conditioning=False,
drop_code=False,
apply_cfg=True,
):
batch_size = hidden_states.shape[0]
if time_step.ndim == 0:
time_step = time_step.repeat(batch_size)
# Compute embeddings
time_embedding = self.time_embed(time_step)
text_embedding = self.text_embed(quantized_code, drop_code=False if apply_cfg else drop_code)
text_embedding_unconditioned = self.text_embed(quantized_code, drop_code=True) if apply_cfg else None
hidden_states = self.input_embed(
hidden_states,
speaker_embedding,
condition_vector,
text_embedding,
drop_audio_cond=drop_audio_conditioning,
code_embed_uncond=text_embedding_unconditioned,
apply_cfg=apply_cfg,
)
# Compute positional encodings
position_ids = torch.arange(hidden_states.shape[1], device=hidden_states.device)
position_ids = position_ids[None, :].repeat(batch_size, 1)
position_embeddings = self.rotary_embed(hidden_states, position_ids)
blockwise_difference = self._create_block_diff(hidden_states)
# Transformer blocks
for transformer_block in self.transformer_blocks:
hidden_states = transformer_block(
hidden_states,
time_embedding,
position_embeddings=position_embeddings,
block_diff=blockwise_difference,
)
hidden_states = self.norm_out(hidden_states, time_embedding)
output = self.proj_out(hidden_states)
return output
@torch.no_grad()
def sample(
self,
conditioning_vector,
reference_mel_spectrogram,
quantized_code,
num_steps=10,
guidance_scale=0.5,
sway_coefficient=-1.0,
):
noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype)
maximum_duration = quantized_code.shape[1] * self.repeats
initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device)
batch_size = reference_mel_spectrogram.shape[0]
conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1)
if batch_size != 1:
raise ValueError("Only batch size = 1 is currently supported")
def ode_function(time_step, hidden_states):
if guidance_scale < 1e-5:
prediction = self(
hidden_states=hidden_states,
speaker_embedding=conditioning_vector,
condition_vector=reference_mel_spectrogram,
quantized_code=quantized_code,
time_step=time_step,
drop_audio_conditioning=False,
drop_code=False,
)
return prediction
model_output = self(
hidden_states=hidden_states,
quantized_code=quantized_code,
speaker_embedding=conditioning_vector,
condition_vector=reference_mel_spectrogram,
time_step=time_step,
apply_cfg=True,
)
guided_prediction, null_prediction = torch.chunk(model_output, 2, dim=0)
return guided_prediction + (guided_prediction - null_prediction) * guidance_scale
initial_time = 0
time_embedding = torch.linspace(
initial_time, 1, num_steps, device=quantized_code.device, dtype=conditioning_vector.dtype
)
if sway_coefficient is not None:
time_embedding += sway_coefficient * (torch.cos(torch.pi / 2 * time_embedding) - 1 + time_embedding)
ode_solver = RungeKutta4ODESolver(function=ode_function, initial_value=initial_state)
solution_trajectory = ode_solver.integrate(time_embedding)
generated_waveform = solution_trajectory[-1]
generated_mel_spectrogram = generated_waveform.permute(0, 2, 1)
return generated_mel_spectrogram
@auto_docstring(
custom_intro="""
The full Qwen2.5Omni Token2Wav model. Consists a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform.
"""
)
| Qwen2_5OmniToken2WavDiTModel |
python | pandas-dev__pandas | pandas/core/indexes/timedeltas.py | {
"start": 1378,
"end": 12362
} | class ____(DatetimeTimedeltaMixin):
"""
Immutable Index of timedelta64 data.
Represented internally as int64, and scalars returned Timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
``'infer'`` can be passed in order to set the frequency of the index as
the inferred frequency upon creation.
dtype : numpy.dtype or str, default None
Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``,
``timedelta64[ms]``, and ``timedelta64[s]``.
copy : bool
Make a copy of input array.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see
:ref:`this link<timeseries.offset_aliases>`.
Examples
--------
>>> pd.TimedeltaIndex(["0 days", "1 days", "2 days", "3 days", "4 days"])
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
We can also let pandas infer the frequency when possible.
>>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq="infer")
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
"""
_typ = "timedeltaindex"
_data_cls = TimedeltaArray
@property
def _engine_type(self) -> type[libindex.TimedeltaEngine]:
return libindex.TimedeltaEngine
_data: TimedeltaArray
# Use base class method instead of DatetimeTimedeltaMixin._get_string_slice
_get_string_slice = Index._get_string_slice
# error: Signature of "_resolution_obj" incompatible with supertype
# "DatetimeIndexOpsMixin"
@property
def _resolution_obj(self) -> Resolution | None: # type: ignore[override]
return self._data._resolution_obj
# -------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
dtype=None,
copy: bool = False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
cls._raise_scalar_data_error(data)
if dtype is not None:
dtype = pandas_dtype(dtype)
if (
isinstance(data, TimedeltaArray)
and freq is lib.no_default
and (dtype is None or dtype == data.dtype)
):
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
if (
isinstance(data, TimedeltaIndex)
and freq is lib.no_default
and name is None
and (dtype is None or dtype == data.dtype)
):
if copy:
return data.copy()
else:
return data._view()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence_not_strict(
data, freq=freq, unit=None, dtype=dtype, copy=copy
)
refs = None
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
return cls._simple_new(tdarr, name=name, refs=refs)
# -------------------------------------------------------------------
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if isinstance(dtype, ArrowDtype):
return dtype.kind == "m"
return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype
# -------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int, slice, or ndarray[int]
"""
self._check_indexing_error(key)
try:
key = self._data._validate_scalar(key, unbox=False)
except TypeError as err:
raise KeyError(key) from err
return Index.get_loc(self, key)
# error: Return type "tuple[Timedelta | NaTType, None]" of "_parse_with_reso"
# incompatible with return type "tuple[datetime, Resolution]" in supertype
# "DatetimeIndexOpsMixin"
def _parse_with_reso(self, label: str) -> tuple[Timedelta | NaTType, None]: # type: ignore[override]
# the "with_reso" is a no-op for TimedeltaIndex
parsed = Timedelta(label)
return parsed, None
def _parsed_string_to_bounds(self, reso, parsed: Timedelta):
# reso is unused, included to match signature of DTI/PI
lbound = parsed.round(parsed.resolution_string)
rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
return lbound, rbound
# -------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "timedelta64"
@set_module("pandas")
def timedelta_range(
start=None,
end=None,
periods: int | None = None,
freq=None,
name=None,
closed=None,
*,
unit: TimeUnit | None = None,
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex with day as the default.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5h'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
unit : {'s', 'ms', 'us', 'ns', None}, default None
Specify the desired resolution of the result.
If not specified, this is inferred from the 'start', 'end', and 'freq'
using the same inference as :class:`Timedelta` taking the highest
resolution of the three that are provided.
.. versionadded:: 2.0.0
Returns
-------
TimedeltaIndex
Fixed frequency, with day as the default.
See Also
--------
date_range : Return a fixed frequency DatetimeIndex.
period_range : Return a fixed frequency PeriodIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
a maximum of three can be specified at once. Of the three parameters
``start``, ``end``, and ``periods``, at least two must be specified.
If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have
``periods`` linearly spaced elements between ``start`` and ``end``
(closed on both sides).
To learn more about the frequency strings, please see
:ref:`this link<timeseries.offset_aliases>`.
Examples
--------
>>> pd.timedelta_range(start="1 day", periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start="1 day", periods=4, closed="right")
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start="1 day", end="2 days", freq="6h")
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6h')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start="1 day", end="5 days", periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
**Specify a unit**
>>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")
TimedeltaIndex(['1 days', '100001 days', '200001 days'],
dtype='timedelta64[s]', freq='100000D')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq = to_offset(freq)
if com.count_not_none(start, end, periods, freq) != 3:
# This check needs to come before the `unit = start.unit` line below
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
if unit is None:
# Infer the unit based on the inputs
if start is not None and end is not None:
start = Timedelta(start)
end = Timedelta(end)
start = cast(Timedelta, start)
end = cast(Timedelta, end)
if abbrev_to_npy_unit(start.unit) > abbrev_to_npy_unit(end.unit):
unit = cast("TimeUnit", start.unit)
else:
unit = cast("TimeUnit", end.unit)
elif start is not None:
start = Timedelta(start)
start = cast(Timedelta, start)
unit = cast("TimeUnit", start.unit)
else:
end = Timedelta(end)
end = cast(Timedelta, end)
unit = cast("TimeUnit", end.unit)
# Last we need to watch out for cases where the 'freq' implies a higher
# unit than either start or end
if freq is not None:
freq = cast("Tick | Day", freq)
creso = abbrev_to_npy_unit(unit)
if freq._creso > creso: # pyright: ignore[reportAttributeAccessIssue]
unit = cast("TimeUnit", freq.base.freqstr)
tdarr = TimedeltaArray._generate_range(
start, end, periods, freq, closed=closed, unit=unit
)
return TimedeltaIndex._simple_new(tdarr, name=name)
| TimedeltaIndex |
python | pypa__warehouse | tests/unit/api/test_simple.py | {
"start": 1037,
"end": 2592
} | class ____:
@pytest.mark.parametrize("header", [None, "text/plain"])
def test_defaults_text_html(self, header):
"""
Ensures that, at least until we want to change the default, that we
default to text/html.
"""
request = DummyRequest(accept=header)
assert simple._select_content_type(request) == simple.MIME_TEXT_HTML
@pytest.mark.parametrize(
("header", "expected"),
[
(simple.MIME_TEXT_HTML, simple.MIME_TEXT_HTML),
(
simple.MIME_PYPI_SIMPLE_V1_HTML,
simple.MIME_PYPI_SIMPLE_V1_HTML,
),
(
simple.MIME_PYPI_SIMPLE_V1_JSON,
simple.MIME_PYPI_SIMPLE_V1_JSON,
),
(
f"{simple.MIME_TEXT_HTML}, {simple.MIME_PYPI_SIMPLE_V1_HTML}, "
f"{simple.MIME_PYPI_SIMPLE_V1_JSON}",
simple.MIME_TEXT_HTML,
),
(
f"{simple.MIME_TEXT_HTML};q=0.01, "
f"{simple.MIME_PYPI_SIMPLE_V1_HTML};q=0.2, "
f"{simple.MIME_PYPI_SIMPLE_V1_JSON}",
simple.MIME_PYPI_SIMPLE_V1_JSON,
),
],
)
def test_selects(self, header, expected):
request = DummyRequest(accept=header)
assert simple._select_content_type(request) == expected
CONTENT_TYPE_PARAMS = [
(simple.MIME_TEXT_HTML, None),
(simple.MIME_PYPI_SIMPLE_V1_HTML, None),
(simple.MIME_PYPI_SIMPLE_V1_JSON, "json"),
]
| TestContentNegotiation |
python | apache__airflow | airflow-core/src/airflow/models/callback.py | {
"start": 8606,
"end": 9507
} | class ____(Callback):
"""Callbacks that run on the executor."""
__mapper_args__ = {"polymorphic_identity": CallbackType.EXECUTOR}
def __init__(
self, callback_def: ImportPathExecutorCallbackDefProtocol, fetch_method: CallbackFetchMethod, **kwargs
):
"""
Initialize an ExecutorCallback from a callback definition and fetch method.
:param callback_def: Callback definition with path, kwargs, and executor
:param fetch_method: Method to fetch the callback at runtime
:param kwargs: Passed to parent Callback.__init__ (see base class for details)
"""
super().__init__(**kwargs)
self.fetch_method = fetch_method
self.data |= callback_def.serialize()
def __repr__(self):
return f"{self.data['path']}({self.data['kwargs'] or ''}) on {self.data.get('executor', 'default')} executor"
| ExecutorCallback |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_inlinehilite.py | {
"start": 17219,
"end": 17936
} | class ____(util.MdCase):
"""Test custom broken InlineHilite cases fails."""
extension = [
'pymdownx.highlight',
'pymdownx.inlinehilite',
]
extension_configs = {
'pymdownx.inlinehilite': {
'custom_inline': [
{
'name': 'test',
'class': 'test',
'format': _format_exploder_fail
}
]
}
}
def test_broken(self):
"""Test custom broken formatter."""
with self.assertRaises(InlineHiliteException):
self.check_markdown(
r'`#!test boom`',
r''
)
| TestInlineHiliteCustomBrokenFormatterFail |
python | google__jax | jax/_src/core.py | {
"start": 3321,
"end": 8218
} | class ____:
__slots__ = ['__weakref__', '_constvars', '_invars', '_outvars', '_eqns',
'_effects', '_debug_info', '_is_high']
_constvars: list[Var]
_invars: list[Var]
_outvars: list[Atom]
_eqns: list[JaxprEqn]
_effects: Effects
_debug_info: DebugInfo
_is_high: bool
@property
def constvars(self) -> list[Var]:
return self._constvars
@property
def invars(self) -> list[Var]:
return self._invars
@property
def outvars(self) -> list[Atom]:
return self._outvars
@property
def eqns(self) -> list[JaxprEqn]:
return self._eqns
@property
def effects(self) -> Effects:
return self._effects
@property
def debug_info(self) -> DebugInfo:
return self._debug_info
@property
def is_high(self) -> bool:
return self._is_high
@property
def in_avals(self):
return [v.aval for v in self.invars]
@property
def in_aval_qdds(self) -> list[AbstractValue | AvalQDD]:
return [v.aval if v.initial_qdd is None else AvalQDD(v.aval, v.initial_qdd)
for v in self.invars]
@property
def final_aval_qdds(self) -> list[AbstractValue | AvalQDD]:
return [v.aval if v.final_qdd is None else AvalQDD(v.aval, v.final_qdd)
for v in self.invars]
@property
def out_avals(self):
return [v.aval for v in self.outvars]
def __init__(self, constvars: Sequence[Var], invars: Sequence[Var],
outvars: Sequence[Atom], eqns: Sequence[JaxprEqn],
effects: Effects = no_effects,
# We want all calls to pass a DebugInfo object, but for backwards
# compatibility we have to allow calls when the debug_info
# is missing.
debug_info: DebugInfo = None, # type: ignore[annotation-type-mismatch,assignment]
is_high: bool = False,
):
"""
Args:
constvars: list of variables introduced for constants. Array constants are
replaced with such variables while scalar constants are kept inline.
invars: list of input variables. Together, `constvars` and `invars` are
the inputs to the Jaxpr.
outvars: list of output atoms.
eqns: list of equations.
effects: set of effects. The effects on a jaxpr are a superset of the
union of the effects for each equation.
debug_info: debugging information.
"""
self._constvars = list(constvars)
self._invars = list(invars)
self._outvars = list(outvars)
self._eqns = list(eqns)
self._effects = effects
# TODO(https://github.com/jax-ml/jax/issues/26480)
debug_info = debug_info or lu._missing_debug_info("core.Jaxpr")
self._debug_info = debug_info.resolve_result_paths()
config.enable_checks.value and self._debug_info.assert_arg_names(len(invars))
config.enable_checks.value and self._debug_info.assert_result_paths(len(outvars))
self._is_high = is_high
def __str__(self):
return str(self.pretty_print())
__repr__ = __str__
def pretty_print(self, *, source_info=False, print_shapes=True,
custom_pp_eqn_rules=True, name_stack=False,
print_effects: bool = False, **kwargs):
doc = pp_toplevel_jaxpr(
self, source_info=source_info, print_shapes=print_shapes,
custom_pp_eqn_rules=custom_pp_eqn_rules, name_stack=name_stack,
print_effects=print_effects)
return doc.format(**kwargs)
def _repr_pretty_(self, p, cycle):
return p.text(self.pretty_print(use_color=True))
def replace(self, **kwargs):
debug_default = self.debug_info
if (kwargs.get('invars', self.invars) != self.invars or
kwargs.get('outvars', self.outvars) != self.outvars):
debug_default = debug_default.with_unknown_names()
jaxpr = Jaxpr(
constvars=kwargs.pop("constvars", self.constvars),
invars=kwargs.pop("invars", self.invars),
outvars=kwargs.pop("outvars", self.outvars),
eqns=kwargs.pop("eqns", self.eqns),
effects=kwargs.pop("effects", self.effects),
debug_info=kwargs.pop("debug_info", debug_default),
is_high=kwargs.pop("is_high", self.is_high),
)
if kwargs:
raise ValueError(f"Unknown keyword arguments: {kwargs}")
return jaxpr
weakref_cache_key_types.add(Jaxpr)
def join_effects(*effects: Effects) -> Effects:
return set().union(*effects) if effects else no_effects
def jaxprs_in_params(params) -> Iterator[Jaxpr]:
for val in params.values():
vals = val if isinstance(val, tuple) else (val,)
for v in vals:
if isinstance(v, Jaxpr):
yield v
elif isinstance(v, ClosedJaxpr):
yield v.jaxpr
def subjaxprs(jaxpr: Jaxpr) -> Iterator[Jaxpr]:
"""Generator for all subjaxprs found in the params of jaxpr.eqns.
Does not descend recursively into the found subjaxprs.
"""
for eqn in jaxpr.eqns:
yield from jaxprs_in_params(eqn.params)
| Jaxpr |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/basic.py | {
"start": 4889,
"end": 5432
} | class ____(DefaultComponent):
type = "image"
def __init__(self, src=None, label=None, title=None, subtitle=None):
super().__init__(title=title, subtitle=subtitle)
self._src = src
self._label = label
def render(self):
datadict = super().render()
img_dict = dict(
src=self._src,
label=self._label,
)
datadict.update(img_dict)
if self.component_id is not None:
datadict["id"] = self.component_id
return datadict
| ImageComponent |
python | google__pytype | pytype/pytd/visitors_test.py | {
"start": 541,
"end": 34881
} | class ____(parser_test_base.ParserTest):
"""Tests the classes in parse/visitors."""
def test_lookup_classes(self):
src = textwrap.dedent("""
from typing import Union
class object:
pass
class A:
def a(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
class B:
def b(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
""")
tree = self.Parse(src)
new_tree = visitors.LookupClasses(tree)
self.AssertSourceEquals(new_tree, src)
new_tree.Visit(visitors.VerifyLookup())
def test_maybe_fill_in_local_pointers(self):
src = textwrap.dedent("""
from typing import Union
class A:
def a(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
""")
tree = self.Parse(src)
ty_a = pytd.ClassType("A")
ty_a.Visit(visitors.FillInLocalPointers({"": tree}))
self.assertIsNotNone(ty_a.cls)
ty_b = pytd.ClassType("B")
ty_b.Visit(visitors.FillInLocalPointers({"": tree}))
self.assertIsNone(ty_b.cls)
def test_deface_unresolved(self):
builtins = self.Parse(textwrap.dedent("""
class int:
pass
"""))
src = textwrap.dedent("""
class A(X):
def a(self, a: A, b: X, c: int) -> X:
raise X()
def b(self) -> X[int]: ...
""")
expected = textwrap.dedent("""
from typing import Any
class A(Any):
def a(self, a: A, b: Any, c: int) -> Any:
raise Any
def b(self) -> Any: ...
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins]))
new_tree.Visit(visitors.VerifyVisitor())
self.AssertSourceEquals(new_tree, expected)
def test_deface_unresolved2(self):
builtins = self.Parse(textwrap.dedent("""
from typing import Generic, TypeVar
class int:
pass
T = TypeVar("T")
class list(Generic[T]):
pass
"""))
src = textwrap.dedent("""
from typing import Union
class A(X):
def a(self, a: A, b: X, c: int) -> X:
raise X()
def c(self) -> Union[list[X], int]: ...
""")
expected = textwrap.dedent("""
from typing import Any, Union
class A(Any):
def a(self, a: A, b: Any, c: int) -> Any:
raise Any
def c(self) -> Union[list[Any], int]: ...
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins]))
new_tree.Visit(visitors.VerifyVisitor())
self.AssertSourceEquals(new_tree, expected)
def test_replace_types_by_name(self):
src = textwrap.dedent("""
from typing import Union
class A:
def a(self, a: Union[A, B]) -> Union[A, B]:
raise A()
raise B()
""")
expected = textwrap.dedent("""
from typing import Union
class A:
def a(self: A2, a: Union[A2, B]) -> Union[A2, B]:
raise A2()
raise B()
""")
tree = self.Parse(src)
tree2 = tree.Visit(visitors.ReplaceTypesByName({"A": pytd.NamedType("A2")}))
self.AssertSourceEquals(tree2, expected)
def test_replace_types_by_matcher(self):
src = textwrap.dedent("""
from typing import Union
class A:
def a(self, a: Union[A, B]) -> Union[A, B]:
raise A()
raise B()
""")
expected = textwrap.dedent("""
from typing import Union
class A:
def a(self: A2, a: Union[A2, B]) -> Union[A2, B]:
raise A2()
raise B()
""")
tree = self.Parse(src)
tree2 = tree.Visit(
visitors.ReplaceTypesByMatcher(
lambda node: node.name == "A", pytd.NamedType("A2")
)
)
self.AssertSourceEquals(tree2, expected)
def test_superclasses_by_name(self):
src = textwrap.dedent("""
class A():
pass
class B():
pass
class C(A):
pass
class D(A,B):
pass
class E(C,D,A):
pass
""")
tree = self.Parse(src)
data = tree.Visit(visitors.ExtractSuperClassesByName())
self.assertCountEqual(("object",), data["A"])
self.assertCountEqual(("object",), data["B"])
self.assertCountEqual(("A",), data["C"])
self.assertCountEqual(("A", "B"), data["D"])
self.assertCountEqual(("A", "C", "D"), data["E"])
def test_remove_unknown_classes(self):
src = pytd_src("""
from typing import Union
class `~unknown1`():
pass
class `~unknown2`():
pass
class A:
def foobar(x: `~unknown1`, y: `~unknown2`) -> Union[`~unknown1`, int]: ...
""")
expected = textwrap.dedent("""
from typing import Any, Union
class A:
def foobar(x, y) -> Union[Any, int]: ...
""")
tree = self.Parse(src)
tree = tree.Visit(visitors.RemoveUnknownClasses())
self.AssertSourceEquals(tree, expected)
def test_in_place_lookup_external_classes(self):
src1 = textwrap.dedent("""
def f1() -> bar.Bar: ...
class Foo:
pass
""")
src2 = textwrap.dedent("""
def f2() -> foo.Foo: ...
class Bar:
pass
""")
ast1 = self.Parse(src1, name="foo")
ast2 = self.Parse(src2, name="bar")
ast1 = ast1.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2)))
ast2 = ast2.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2)))
(f1,) = ast1.Lookup("foo.f1").signatures
(f2,) = ast2.Lookup("bar.f2").signatures
self.assertIs(ast2.Lookup("bar.Bar"), f1.return_type.cls)
self.assertIs(ast1.Lookup("foo.Foo"), f2.return_type.cls)
def test_lookup_constant(self):
src1 = textwrap.dedent("""
Foo = ... # type: type
""")
src2 = textwrap.dedent("""
class Bar:
bar = ... # type: foo.Foo
""")
ast1 = self.Parse(src1, name="foo").Visit(
visitors.LookupBuiltins(self.loader.builtins)
)
ast2 = self.Parse(src2, name="bar")
ast2 = ast2.Visit(visitors.LookupExternalTypes({"foo": ast1, "bar": ast2}))
self.assertEqual(
ast2.Lookup("bar.Bar").constants[0],
pytd.Constant(name="bar", type=pytd.AnythingType()),
)
def test_lookup_star_alias(self):
src1 = textwrap.dedent("""
x = ... # type: int
T = TypeVar("T")
class A: ...
def f(x: T) -> T: ...
B = A
""")
src2 = "from foo import *"
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast2 = ast2.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"
)
)
self.assertEqual("bar", ast2.name)
self.assertSetEqual(
{a.name for a in ast2.aliases},
{"bar.x", "bar.T", "bar.A", "bar.f", "bar.B"},
)
def test_lookup_star_alias_in_unnamed_module(self):
src1 = textwrap.dedent("""
class A: ...
""")
src2 = "from foo import *"
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = self.Parse(src2)
name = ast2.name
ast2 = ast2.Visit(
visitors.LookupExternalTypes({"foo": ast1}, self_name=None)
)
self.assertEqual(name, ast2.name)
self.assertEqual(pytd_utils.Print(ast2), "from foo import A")
def test_lookup_two_star_aliases(self):
src1 = "class A: ..."
src2 = "class B: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast3 = (
self.Parse(src3).Replace(name="baz").Visit(visitors.ResolveLocalNames())
)
ast3 = ast3.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"
)
)
self.assertSetEqual({a.name for a in ast3.aliases}, {"baz.A", "baz.B"})
def test_lookup_two_star_aliases_with_same_class(self):
src1 = "class A: ..."
src2 = "class A: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast3 = (
self.Parse(src3).Replace(name="baz").Visit(visitors.ResolveLocalNames())
)
self.assertRaises(
KeyError,
ast3.Visit,
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"
),
)
def test_lookup_star_alias_with_duplicate_class(self):
src1 = "class A: ..."
src2 = textwrap.dedent("""
from foo import *
class A:
x = ... # type: int
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast2 = ast2.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"
)
)
self.assertMultiLineEqual(
pytd_utils.Print(ast2),
textwrap.dedent("""
class bar.A:
x: int
""").strip(),
)
def test_lookup_two_star_aliases_with_default_pyi(self):
src1 = DEFAULT_PYI
src2 = DEFAULT_PYI
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast3 = (
self.Parse(src3).Replace(name="baz").Visit(visitors.ResolveLocalNames())
)
ast3 = ast3.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"
)
)
self.assertMultiLineEqual(
pytd_utils.Print(ast3),
textwrap.dedent("""
from typing import Any
def baz.__getattr__(name) -> Any: ...
""").strip(),
)
def test_lookup_star_alias_with_duplicate_getattr(self):
src1 = DEFAULT_PYI
src2 = textwrap.dedent("""
from typing import Any
from foo import *
def __getattr__(name) -> Any: ...
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast2 = ast2.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"
)
)
self.assertMultiLineEqual(
pytd_utils.Print(ast2),
textwrap.dedent("""
from typing import Any
def bar.__getattr__(name) -> Any: ...
""").strip(),
)
def test_lookup_two_star_aliases_with_different_getattrs(self):
src1 = "def __getattr__(name) -> int: ..."
src2 = "def __getattr__(name) -> str: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast3 = (
self.Parse(src3).Replace(name="baz").Visit(visitors.ResolveLocalNames())
)
self.assertRaises(
KeyError,
ast3.Visit,
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"
),
)
def test_lookup_star_alias_with_different_getattr(self):
src1 = "def __getattr__(name) -> int: ..."
src2 = textwrap.dedent("""
from foo import *
def __getattr__(name) -> str: ...
""")
ast1 = (
self.Parse(src1).Replace(name="foo").Visit(visitors.ResolveLocalNames())
)
ast2 = (
self.Parse(src2).Replace(name="bar").Visit(visitors.ResolveLocalNames())
)
ast2 = ast2.Visit(
visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"
)
)
self.assertMultiLineEqual(
pytd_utils.Print(ast2),
textwrap.dedent("""
def bar.__getattr__(name) -> str: ...
""").strip(),
)
def test_collect_dependencies(self):
src = textwrap.dedent("""
from typing import Union
l = ... # type: list[Union[int, baz.BigInt]]
def f1() -> bar.Bar: ...
def f2() -> foo.bar.Baz: ...
""")
deps = visitors.CollectDependencies()
self.Parse(src).Visit(deps)
self.assertCountEqual({"baz", "bar", "foo.bar"}, deps.dependencies)
def test_expand(self):
src = textwrap.dedent("""
from typing import Union
def foo(a: Union[int, float], z: Union[complex, str], u: bool) -> file: ...
def bar(a: int) -> Union[str, unicode]: ...
""")
new_src = textwrap.dedent("""
from typing import Union
def foo(a: int, z: complex, u: bool) -> file: ...
def foo(a: int, z: str, u: bool) -> file: ...
def foo(a: float, z: complex, u: bool) -> file: ...
def foo(a: float, z: str, u: bool) -> file: ...
def bar(a: int) -> Union[str, unicode]: ...
""")
self.AssertSourceEquals(
self.ApplyVisitorToString(src, visitors.ExpandSignatures()), new_src
)
def test_print_imports(self):
src = textwrap.dedent("""
from typing import Any, List, Tuple, Union
def f(x: Union[int, slice]) -> List[Any]: ...
def g(x: foo.C.C2) -> None: ...
""")
expected = textwrap.dedent("""
import foo
from typing import Any, Union
def f(x: Union[int, slice]) -> list[Any]: ...
def g(x: foo.C.C2) -> None: ...
""").strip()
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.AssertSourceEquals(res, expected)
self.assertMultiLineEqual(res, expected)
def test_print_imports_named_type(self):
# Can't get tree by parsing so build explicitly
node = pytd.Constant("x", pytd.NamedType("typing.List"))
tree = pytd_utils.CreateModule(name=None, constants=(node,))
expected_src = textwrap.dedent("""
from typing import List
x: List
""").strip()
res = pytd_utils.Print(tree)
self.assertMultiLineEqual(res, expected_src)
def test_print_imports_ignores_existing(self):
src = "from foo import b"
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.assertMultiLineEqual(res, src)
@unittest.skip("depended on `or`")
def test_print_union_name_conflict(self):
src = textwrap.dedent("""
class Union: ...
def g(x: Union) -> Union[int, float]: ...
""")
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.AssertSourceEquals(res, src)
def test_adjust_type_parameters(self):
ast = self.Parse("""
from typing import Union
T = TypeVar("T")
T2 = TypeVar("T2")
def f(x: T) -> T: ...
class A(Generic[T]):
def a(self, x: T2) -> None:
self = A[Union[T, T2]]
""")
f = ast.Lookup("f")
(sig,) = f.signatures
(p_x,) = sig.params
self.assertEqual(
sig.template, (pytd.TemplateItem(pytd.TypeParameter("T", scope="f")),)
)
self.assertEqual(p_x.type, pytd.TypeParameter("T", scope="f"))
cls = ast.Lookup("A")
(f_cls,) = cls.methods
(sig_cls,) = f_cls.signatures
p_self, p_x_cls = sig_cls.params
self.assertEqual(
cls.template, (pytd.TemplateItem(pytd.TypeParameter("T", scope="A")),)
)
self.assertEqual(
sig_cls.template,
(pytd.TemplateItem(pytd.TypeParameter("T2", scope="A.a")),),
)
self.assertEqual(
p_self.type.parameters, (pytd.TypeParameter("T", scope="A"),)
)
self.assertEqual(p_x_cls.type, pytd.TypeParameter("T2", scope="A.a"))
def test_adjust_type_parameters_with_builtins(self):
ast = self.ParseWithBuiltins("""
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
class Foo(List[int]): pass
class Bar(Dict[T, int]): pass
class Baz(Generic[K, V]): pass
class Qux(Baz[str, int]): pass
""")
foo = ast.Lookup("Foo")
bar = ast.Lookup("Bar")
qux = ast.Lookup("Qux")
(foo_base,) = foo.bases
(bar_base,) = bar.bases
(qux_base,) = qux.bases
# Expected:
# Class(Foo, base=GenericType(List, parameters=(int,)), template=())
# Class(Bar, base=GenericType(Dict, parameters=(T, int)), template=(T))
# Class(Qux, base=GenericType(Baz, parameters=(str, int)), template=())
self.assertEqual((pytd.ClassType("int"),), foo_base.parameters)
self.assertEqual((), foo.template)
self.assertEqual(
(pytd.TypeParameter("T", scope="Bar"), pytd.ClassType("int")),
bar_base.parameters,
)
self.assertEqual(
(pytd.TemplateItem(pytd.TypeParameter("T", scope="Bar")),), bar.template
)
self.assertEqual(
(pytd.ClassType("str"), pytd.ClassType("int")), qux_base.parameters
)
self.assertEqual((), qux.template)
def test_adjust_type_parameters_with_duplicates(self):
ast = self.ParseWithBuiltins("""
T = TypeVar("T")
class A(Dict[T, T], Generic[T]): pass
""")
a = ast.Lookup("A")
self.assertEqual(
(pytd.TemplateItem(pytd.TypeParameter("T", scope="A")),), a.template
)
def test_adjust_type_parameters_with_duplicates_in_generic(self):
src = textwrap.dedent("""
T = TypeVar("T")
class A(Generic[T, T]): pass
""")
self.assertRaises(visitors.ContainerError, lambda: self.Parse(src))
def test_verify_containers(self):
ast1 = self.ParseWithBuiltins("""
from typing import SupportsInt, TypeVar
T = TypeVar("T")
class Foo(SupportsInt[T]): pass
""")
ast2 = self.ParseWithBuiltins("""
from typing import SupportsInt
class Foo(SupportsInt[int]): pass
""")
ast3 = self.ParseWithBuiltins("""
from typing import Generic
class Foo(Generic[int]): pass
""")
ast4 = self.ParseWithBuiltins("""
from typing import List
class Foo(List[int, str]): pass
""")
self.assertRaises(
visitors.ContainerError, lambda: ast1.Visit(visitors.VerifyContainers())
)
self.assertRaises(
visitors.ContainerError, lambda: ast2.Visit(visitors.VerifyContainers())
)
self.assertRaises(
visitors.ContainerError, lambda: ast3.Visit(visitors.VerifyContainers())
)
self.assertRaises(
visitors.ContainerError, lambda: ast4.Visit(visitors.VerifyContainers())
)
def test_clear_class_pointers(self):
cls = pytd.Class("foo", (), (), (), (), (), (), None, ())
t = pytd.ClassType("foo", cls)
t = t.Visit(visitors.ClearClassPointers())
self.assertIsNone(t.cls)
def test_add_name_prefix(self):
src = textwrap.dedent("""
from typing import TypeVar
def f(a: T) -> T: ...
T = TypeVar("T")
class X(Generic[T]):
pass
""")
tree = self.Parse(src)
self.assertIsNone(tree.Lookup("T").scope)
self.assertEqual("X", tree.Lookup("X").template[0].type_param.scope)
tree = tree.Replace(name="foo").Visit(visitors.ResolveLocalNames())
self.assertIsNotNone(tree.Lookup("foo.f"))
self.assertIsNotNone(tree.Lookup("foo.X"))
self.assertEqual("foo", tree.Lookup("foo.T").scope)
self.assertEqual("foo.X", tree.Lookup("foo.X").template[0].type_param.scope)
def test_add_name_prefix_twice(self):
src = textwrap.dedent("""
from typing import Any, TypeVar
x = ... # type: Any
T = TypeVar("T")
class X(Generic[T]): ...
""")
tree = self.Parse(src)
tree = tree.Replace(name="foo").Visit(visitors.ResolveLocalNames())
tree = tree.Replace(name="foo").Visit(visitors.ResolveLocalNames())
self.assertIsNotNone(tree.Lookup("foo.foo.x"))
self.assertEqual("foo.foo", tree.Lookup("foo.foo.T").scope)
self.assertEqual(
"foo.foo.X", tree.Lookup("foo.foo.X").template[0].type_param.scope
)
def test_add_name_prefix_on_class_type(self):
src = textwrap.dedent("""
x = ... # type: y
class Y: ...
""")
tree = self.Parse(src)
x = tree.Lookup("x")
x = x.Replace(type=pytd.ClassType("Y"))
tree = tree.Replace(constants=(x,), name="foo")
tree = tree.Visit(visitors.ResolveLocalNames())
self.assertEqual("foo.Y", tree.Lookup("foo.x").type.name)
def test_add_name_prefix_on_nested_class_alias(self):
src = textwrap.dedent("""
class A:
class B:
class C: ...
D = A.B.C
""")
expected = textwrap.dedent("""
class foo.A:
class foo.A.B:
class foo.A.B.C: ...
D: type[foo.A.B.C]
""").strip()
self.assertMultiLineEqual(
expected,
pytd_utils.Print(
self.Parse(src)
.Replace(name="foo")
.Visit(visitors.ResolveLocalNames())
),
)
def test_add_name_prefix_on_nested_class_outside_ref(self):
src = textwrap.dedent("""
class A:
class B: ...
b: A.B
C = A.B
def f(x: A.B) -> A.B: ...
class D:
b: A.B
def f(self, x: A.B) -> A.B: ...
""")
ast = self.Parse(src)
ast = ast.Replace(name="foo").Visit(visitors.ResolveLocalNames())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
foo.b: foo.A.B
foo.C: type[foo.A.B]
class foo.A:
class foo.A.B: ...
class foo.D:
b: foo.A.B
def f(self, x: foo.A.B) -> foo.A.B: ...
def foo.f(x: foo.A.B) -> foo.A.B: ...
""").strip(),
)
# Check that even after `RemoveNamePrefix`, the type annotation for `self`
# is skipped from being printed.
ast = ast.Visit(visitors.RemoveNamePrefix())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
b: A.B
C: type[A.B]
class A:
class B: ...
class D:
b: A.B
def f(self, x: A.B) -> A.B: ...
def f(x: A.B) -> A.B: ...
""").strip(),
)
def test_add_name_prefix_on_nested_method(self):
src = textwrap.dedent("""
class A:
class B:
def copy(self) -> A.B: ...
""")
ast = self.Parse(src)
ast = ast.Replace(name="foo").Visit(visitors.ResolveLocalNames())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
class foo.A:
class foo.A.B:
def copy(self) -> foo.A.B: ...
""").strip(),
)
# Check that even after `RemoveNamePrefix`, the type annotation for `self`
# is skipped from being printed.
ast = ast.Visit(visitors.RemoveNamePrefix())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
class A:
class B:
def copy(self) -> A.B: ...
""").strip(),
)
def test_add_name_prefix_on_classmethod(self):
src = textwrap.dedent("""
class A:
@classmethod
def foo(cls, a: int) -> int: ...
@classmethod
def bar(cls) -> A: ...
""")
ast = self.Parse(src)
ast = ast.Replace(name="foo").Visit(visitors.ResolveLocalNames())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
class foo.A:
@classmethod
def foo(cls, a: int) -> int: ...
@classmethod
def bar(cls) -> foo.A: ...
""").strip(),
)
# Check that even after `RemoveNamePrefix`, the type annotation for `cls`
# is skipped from being printed.
ast = ast.Visit(visitors.RemoveNamePrefix())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
class A:
@classmethod
def foo(cls, a: int) -> int: ...
@classmethod
def bar(cls) -> A: ...
""").strip(),
)
def test_print_merge_types(self):
src = textwrap.dedent("""
from typing import Union
def a(a: float) -> int: ...
def b(a: Union[int, float]) -> int: ...
def c(a: object) -> Union[float, int]: ...
def d(a: float) -> int: ...
def e(a: Union[bool, None]) -> Union[bool, None]: ...
""")
expected = textwrap.dedent("""
from typing import Optional, Union
def a(a: float) -> int: ...
def b(a: float) -> int: ...
def c(a: object) -> Union[float, int]: ...
def d(a: float) -> int: ...
def e(a: Optional[bool]) -> Optional[bool]: ...
""")
self.assertMultiLineEqual(
expected.strip(), pytd_utils.Print(self.ToAST(src)).strip()
)
def test_print_heterogeneous_tuple(self):
t = pytd.TupleType(
pytd.NamedType("tuple"),
(pytd.NamedType("str"), pytd.NamedType("float")),
)
self.assertEqual("tuple[str, float]", pytd_utils.Print(t))
def test_verify_heterogeneous_tuple(self):
# Error: does not inherit from Generic
base = pytd.ClassType("tuple")
base.cls = pytd.Class("tuple", (), (), (), (), (), (), None, ())
t1 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float")))
self.assertRaises(
visitors.ContainerError, lambda: t1.Visit(visitors.VerifyContainers())
)
# Error: Generic[str, float]
gen = pytd.ClassType("typing.Generic")
gen.cls = pytd.Class("typing.Generic", (), (), (), (), (), (), None, ())
t2 = pytd.TupleType(gen, (pytd.NamedType("str"), pytd.NamedType("float")))
self.assertRaises(
visitors.ContainerError, lambda: t2.Visit(visitors.VerifyContainers())
)
# Okay
param = pytd.TypeParameter("T")
generic_base = pytd.GenericType(gen, (param,))
base.cls = pytd.Class(
"tuple",
(),
(generic_base,),
(),
(),
(),
(),
None,
(pytd.TemplateItem(param),),
)
t3 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float")))
t3.Visit(visitors.VerifyContainers())
def test_typevar_value_conflict(self):
# Conflicting values for _T.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[int], List[str]): ...
""")
self.assertRaises(
visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())
)
def test_typevar_value_conflict_hidden(self):
# Conflicting value for _T hidden in MRO.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[int]): ...
class B(A, List[str]): ...
""")
self.assertRaises(
visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())
)
def test_typevar_value_conflict_related_containers(self):
# List inherits from Sequence, so they share a type parameter.
ast = self.ParseWithBuiltins("""
from typing import List, Sequence
class A(List[int], Sequence[str]): ...
""")
self.assertRaises(
visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())
)
def test_typevar_value_no_conflict(self):
# Not an error if the containers are unrelated, even if they use the same
# type parameter name.
ast = self.ParseWithBuiltins("""
from typing import ContextManager, SupportsAbs
class Foo(SupportsAbs[float], ContextManager[Foo]): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_typevar_value_consistency(self):
# Type renaming makes all type parameters represent the same type `T1`.
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
class A(Generic[T1]): ...
class B1(A[T2]): ...
class B2(A[T3]): ...
class C(B1[T4], B2[T5]): ...
class D(C[str, str], A[str]): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_typevar_value_and_alias_conflict(self):
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]): ...
class B(A[int], A[T]): ...
""")
self.assertRaises(
visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())
)
def test_typevar_alias_and_value_conflict(self):
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]): ...
class B(A[T], A[int]): ...
""")
self.assertRaises(
visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())
)
def test_verify_container_with_mro_error(self):
# Make sure we don't crash.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[str]): ...
class B(List[str], A): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_alias_printing(self):
a = pytd.Alias(
"MyList",
pytd.GenericType(pytd.NamedType("typing.List"), (pytd.AnythingType(),)),
)
ty = pytd_utils.CreateModule("test", aliases=(a,))
expected = textwrap.dedent("""
from typing import Any, List
MyList = List[Any]""")
self.assertMultiLineEqual(expected.strip(), pytd_utils.Print(ty).strip())
def test_print_none_union(self):
src = textwrap.dedent("""
from typing import Union
def f(x: Union[str, None]) -> None: ...
def g(x: Union[str, int, None]) -> None: ...
def h(x: Union[None]) -> None: ...
""")
expected = textwrap.dedent("""
from typing import Optional, Union
def f(x: Optional[str]) -> None: ...
def g(x: Optional[Union[str, int]]) -> None: ...
def h(x: None) -> None: ...
""")
self.assertMultiLineEqual(
expected.strip(), pytd_utils.Print(self.ToAST(src)).strip()
)
def test_lookup_typing_class(self):
node = visitors.LookupClasses(
pytd.NamedType("typing.Sequence"), self.loader.concat_all()
)
assert node.cls
def test_create_type_parameters_from_unknowns(self):
src = pytd_src("""
from typing import Dict
def f(x: `~unknown1`) -> `~unknown1`: ...
def g(x: `~unknown2`, y: `~unknown2`) -> None: ...
def h(x: `~unknown3`) -> None: ...
def i(x: Dict[`~unknown4`, `~unknown4`]) -> None: ...
# Should not be changed
class `~unknown5`:
def __add__(self, x: `~unknown6`) -> `~unknown6`: ...
def `~f`(x: `~unknown7`) -> `~unknown7`: ...
""")
expected = pytd_src("""
from typing import Dict
_T0 = TypeVar('_T0')
def f(x: _T0) -> _T0: ...
def g(x: _T0, y: _T0) -> None: ...
def h(x: `~unknown3`) -> None: ...
def i(x: Dict[_T0, _T0]) -> None: ...
class `~unknown5`:
def __add__(self, x: `~unknown6`) -> `~unknown6`: ...
def `~f`(x: `~unknown7`) -> `~unknown7`: ...
""")
ast1 = self.Parse(src)
ast1 = ast1.Visit(visitors.CreateTypeParametersForSignatures())
self.AssertSourceEquals(ast1, expected)
@unittest.skip("We no longer support redefining TypeVar")
def test_redefine_typevar(self):
src = pytd_src("""
def f(x: `~unknown1`) -> `~unknown1`: ...
class `TypeVar`: ...
""")
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
import typing
_T0 = TypeVar('_T0')
class `TypeVar`: ...
def f(x: _T0) -> _T0: ...""").strip(),
)
def test_create_type_parameters_for_new(self):
src = textwrap.dedent("""
class Foo:
def __new__(cls: Type[Foo]) -> Foo: ...
class Bar:
def __new__(cls: Type[Bar], x, y, z) -> Bar: ...
""")
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(
pytd_utils.Print(ast),
textwrap.dedent("""
from typing import TypeVar
_TBar = TypeVar('_TBar', bound=Bar)
_TFoo = TypeVar('_TFoo', bound=Foo)
class Foo:
def __new__(cls: Type[_TFoo]) -> _TFoo: ...
class Bar:
def __new__(cls: Type[_TBar], x, y, z) -> _TBar: ...
""").strip(),
)
def test_keep_custom_new(self):
src = textwrap.dedent("""
class Foo:
def __new__(cls: Type[X]) -> X: ...
class Bar:
def __new__(cls, x: Type[Bar]) -> Bar: ...
""").strip()
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(pytd_utils.Print(ast), src)
def test_print_type_parameter_bound(self):
src = textwrap.dedent("""
from typing import TypeVar
T = TypeVar("T", bound=str)
""")
self.assertMultiLineEqual(
pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
from typing import TypeVar
T = TypeVar('T', bound=str)""").lstrip(),
)
def test_print_cls(self):
src = textwrap.dedent("""
class A:
def __new__(cls: Type[A]) -> A: ...
""")
self.assertMultiLineEqual(
pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
class A:
def __new__(cls) -> A: ...
""").strip(),
)
def test_print_never(self):
src = textwrap.dedent("""
def f() -> nothing: ...
""")
self.assertMultiLineEqual(
pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
from typing import Never
def f() -> Never: ...""").lstrip(),
)
def test_print_multiline_signature(self):
src = textwrap.dedent("""
def f(x: int, y: str, z: bool) -> list[str]:
pass
""")
self.assertMultiLineEqual(
pytd_utils.Print(self.Parse(src), multiline_args=True),
textwrap.dedent("""
def f(
x: int,
y: str,
z: bool
) -> list[str]: ...
""").strip(),
)
| TestVisitors |
python | kamyu104__LeetCode-Solutions | Python/shopping-offers.py | {
"start": 35,
"end": 892
} | class ____(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
def shoppingOffersHelper(price, special, needs, i):
if i == len(special):
return sum(map(lambda x, y: x*y, price, needs))
result = shoppingOffersHelper(price, special, needs, i+1)
for j in xrange(len(needs)):
needs[j] -= special[i][j]
if all(need >= 0 for need in needs):
result = min(result, special[i][-1] + shoppingOffersHelper(price, special, needs, i))
for j in xrange(len(needs)):
needs[j] += special[i][j]
return result
return shoppingOffersHelper(price, special, needs, 0)
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0020_add-api-project-proxy.py | {
"start": 120,
"end": 492
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0019_add-features"),
]
operations = [
migrations.CreateModel(
name="APIProject",
fields=[],
options={
"proxy": True,
},
bases=("projects.project",),
),
]
| Migration |
python | huggingface__transformers | tests/trainer/test_trainer_distributed_worker_seed.py | {
"start": 1455,
"end": 2640
} | class ____(TestCasePlus):
@run_first
@require_torch_multi_accelerator
def test_trainer(self):
device_count = backend_device_count(torch_device)
output_dir = self.get_auto_remove_tmp_dir()
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
""".split()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
def run_distributed_training(training_args):
set_seed(42)
model = DummyModel()
dataset = DummyDataset()
training_args.max_steps = 10
# dataloader_num_workers must be > 0 to enable worker_init_fn
training_args.dataloader_num_workers = 2
trainer = Trainer(
model,
training_args,
train_dataset=dataset,
)
trainer.train()
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
| TestTrainerDistributedWorkerSeed |
python | getsentry__sentry | tests/sentry/uptime/subscriptions/test_tasks.py | {
"start": 10602,
"end": 11949
} | class ____(BaseUptimeSubscriptionTaskTest):
expected_status = UptimeSubscription.Status.DELETING
task = delete_remote_uptime_subscription
def test(self) -> None:
subscription_id = uuid4().hex
sub = self.create_subscription(
UptimeSubscription.Status.DELETING, subscription_id=subscription_id
)
delete_remote_uptime_subscription(sub.id)
assert not UptimeSubscription.objects.filter(id=sub.id).exists()
self.assert_redis_config("default", sub, "delete", None)
def test_no_subscription_id(self) -> None:
sub = self.create_subscription(UptimeSubscription.Status.DELETING)
assert sub.subscription_id is None
delete_remote_uptime_subscription(sub.id)
assert not UptimeSubscription.objects.filter(id=sub.id).exists()
def test_delete_with_regions(self) -> None:
sub = self.create_uptime_subscription(
status=UptimeSubscription.Status.DELETING,
subscription_id=uuid4().hex,
region_slugs=["default"],
)
delete_remote_uptime_subscription(sub.id)
assert sub.subscription_id is not None
self.assert_redis_config("default", sub, "delete", None)
with pytest.raises(UptimeSubscription.DoesNotExist):
sub.refresh_from_db()
| DeleteUptimeSubscriptionTaskTest |
python | pytorch__pytorch | torch/distributions/dirichlet.py | {
"start": 1100,
"end": 4561
} | class ____(ExponentialFamily):
r"""
Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Dirichlet(torch.tensor([0.5, 0.5]))
>>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5]
tensor([ 0.1046, 0.8954])
Args:
concentration (Tensor): concentration parameter of the distribution
(often referred to as alpha)
"""
# pyrefly: ignore [bad-override]
arg_constraints = {
"concentration": constraints.independent(constraints.positive, 1)
}
support = constraints.simplex
has_rsample = True
def __init__(
self,
concentration: Tensor,
validate_args: Optional[bool] = None,
) -> None:
if concentration.dim() < 1:
raise ValueError(
"`concentration` parameter must be at least one-dimensional."
)
self.concentration = concentration
batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Dirichlet, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape + self.event_shape)
super(Dirichlet, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape: _size = ()) -> Tensor:
shape = self._extended_shape(sample_shape)
concentration = self.concentration.expand(shape)
return _Dirichlet.apply(concentration)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return (
torch.xlogy(self.concentration - 1.0, value).sum(-1)
+ torch.lgamma(self.concentration.sum(-1))
- torch.lgamma(self.concentration).sum(-1)
)
@property
def mean(self) -> Tensor:
return self.concentration / self.concentration.sum(-1, True)
@property
def mode(self) -> Tensor:
concentrationm1 = (self.concentration - 1).clamp(min=0.0)
mode = concentrationm1 / concentrationm1.sum(-1, True)
mask = (self.concentration < 1).all(dim=-1)
mode[mask] = torch.nn.functional.one_hot(
mode[mask].argmax(dim=-1), concentrationm1.shape[-1]
).to(mode)
return mode
@property
def variance(self) -> Tensor:
con0 = self.concentration.sum(-1, True)
return (
self.concentration
* (con0 - self.concentration)
/ (con0.pow(2) * (con0 + 1))
)
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (
torch.lgamma(self.concentration).sum(-1)
- torch.lgamma(a0)
- (k - a0) * torch.digamma(a0)
- ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1)
)
@property
def _natural_params(self) -> tuple[Tensor]:
return (self.concentration,)
# pyrefly: ignore [bad-override]
def _log_normalizer(self, x):
return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
| Dirichlet |
python | django__django | django/contrib/admin/options.py | {
"start": 90537,
"end": 99579
} | class ____(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name_plural is None:
if self.verbose_name is None:
self.verbose_name_plural = self.opts.verbose_name_plural
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
if self.verbose_name is None:
self.verbose_name = self.opts.verbose_name
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"]
if self.filter_vertical or self.filter_horizontal:
js.extend(["SelectBox.js", "SelectFilter2.js"])
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in add/change views."""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
**kwargs,
}
base_model_form = defaults["form"]
can_change = self.has_change_permission(request, obj) if request else True
can_add = self.has_add_permission(request, obj) if request else True
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance._state.adding:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance
# representation, suitable to be an item in a
# list.
_("%(class_name)s %(instance)s")
% {"class_name": p._meta.verbose_name, "instance": p}
)
params = {
"class_name": self._meta.model._meta.verbose_name,
"instance": self.instance,
"related_objects": get_text_list(objs, _("and")),
}
msg = _(
"Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s"
)
raise ValidationError(
msg, code="deleting_protected", params=params
)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
def has_changed(self):
# Protect against unauthorized edits.
if not can_change and not self.instance._state.adding:
return False
if not can_add and self.instance._state.adding:
return False
return super().has_changed()
defaults["form"] = DeleteProtectedModelForm
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def _get_form_for_get_fields(self, request, obj=None):
return self.get_formset(request, obj, fields=None).form
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_view_or_change_permission(request):
queryset = queryset.none()
return queryset
def _has_any_perms_for_target_model(self, request, perms):
"""
This method is called only when the ModelAdmin's model is for an
ManyToManyField's implicit through model (if self.opts.auto_created).
Return True if the user has any of the given permissions ('add',
'change', etc.) for the model that points to the through model.
"""
opts = self.opts
# Find the target model of an auto-created many-to-many relationship.
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
return any(
request.user.has_perm(
"%s.%s" % (opts.app_label, get_permission_codename(perm, opts))
)
for perm in perms
)
def has_add_permission(self, request, obj):
if self.opts.auto_created:
# Auto-created intermediate models don't have their own
# permissions. The user needs to have the change permission for the
# related model in order to be able to do anything with the
# intermediate model.
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_change_permission(request)
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_delete_permission(request, obj)
def has_view_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission(). The 'change' permission
# also implies the 'view' permission.
return self._has_any_perms_for_target_model(request, ["view", "change"])
return super().has_view_permission(request)
| InlineModelAdmin |
python | PyCQA__pylint | doc/data/messages/d/duplicate-code/good/orange.py | {
"start": 26,
"end": 214
} | class ____(Fruit):
def eaten_by_animal(self, animal):
if animal == "cat":
raise ValueError("A cat would never do that !")
super().eaten_by_animal(animal)
| Orange |
python | getsentry__sentry | src/sentry/api/serializers/models/team.py | {
"start": 11086,
"end": 11796
} | class ____(BaseTeamSerializer):
def serialize(
self,
obj: Team,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> TeamSerializerResponse:
result = super().serialize(obj, attrs, user, **kwargs)
opt: _TeamSerializerResponseOptional = {}
# Expandable attributes.
if self._expand("externalTeams"):
opt["externalTeams"] = attrs["externalTeams"]
if self._expand("organization"):
opt["organization"] = serialize(obj.organization, user)
if self._expand("projects"):
opt["projects"] = attrs["projects"]
return {**result, **opt}
| TeamSerializer |
python | python__mypy | mypy/main.py | {
"start": 8252,
"end": 9647
} | class ____(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str, **kwargs: Any) -> None:
super().__init__(prog=prog, max_help_position=28, **kwargs)
def _fill_text(self, text: str, width: int, indent: str) -> str:
if "\n" in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
# Format the text like argparse, but overflow rather than
# breaking long words (like URLs)
text = self._whitespace_matcher.sub(" ", text).strip()
import textwrap
return textwrap.fill(
text,
width,
initial_indent=indent,
subsequent_indent=indent,
break_on_hyphens=False,
break_long_words=False,
)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs: Final = [("allow", "disallow"), ("show", "hide")]
flag_prefix_map: Final[dict[str, str]] = {}
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split("-", 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return f"--{flag_prefix_map[prefix]}-{rest}"
elif prefix == "no":
return f"--{rest}"
return f"--no-{flag[2:]}"
| AugmentedHelpFormatter |
python | ray-project__ray | python/ray/autoscaler/v2/tests/util.py | {
"start": 3893,
"end": 4056
} | class ____(abc.ABC):
@abstractmethod
def check(self, status: ClusterStatus):
pass
def __repr__(self) -> str:
return self.__str__()
| Check |
python | getlogbook__logbook | src/logbook/base.py | {
"start": 9692,
"end": 10402
} | class ____(ContextObject):
"""Can be pushed to a stack to inject additional information into
a log record as necessary::
def inject_ip(record):
record.extra["ip"] = "127.0.0.1"
with Processor(inject_ip):
...
"""
stack_manager = ContextStackManager()
def __init__(self, callback=None):
#: the callback that was passed to the constructor
self.callback = callback
def process(self, record):
"""Called with the log record that should be overridden. The default
implementation calls :attr:`callback` if it is not `None`.
"""
if self.callback is not None:
self.callback(record)
| Processor |
python | django__django | tests/auth_tests/test_hashers.py | {
"start": 30585,
"end": 33598
} | class ____(SimpleTestCase):
def test_scrypt(self):
encoded = make_password("lètmein", "seasalt", "scrypt")
self.assertEqual(
encoded,
"scrypt$16384$seasalt$8$5$ECMIUp+LMxMSK8xB/IVyba+KYGTI7FTnet025q/1f"
"/vBAVnnP3hdYqJuRi+mJn6ji6ze3Fbb7JEFPKGpuEf5vw==",
)
self.assertIs(is_password_usable(encoded), True)
self.assertIs(check_password("lètmein", encoded), True)
self.assertIs(check_password("lètmeinz", encoded), False)
self.assertEqual(identify_hasher(encoded).algorithm, "scrypt")
# Blank passwords.
blank_encoded = make_password("", "seasalt", "scrypt")
self.assertIs(blank_encoded.startswith("scrypt$"), True)
self.assertIs(is_password_usable(blank_encoded), True)
self.assertIs(check_password("", blank_encoded), True)
self.assertIs(check_password(" ", blank_encoded), False)
def test_scrypt_decode(self):
encoded = make_password("lètmein", "seasalt", "scrypt")
hasher = get_hasher("scrypt")
decoded = hasher.decode(encoded)
tests = [
("block_size", hasher.block_size),
("parallelism", hasher.parallelism),
("salt", "seasalt"),
("work_factor", hasher.work_factor),
]
for key, excepted in tests:
with self.subTest(key=key):
self.assertEqual(decoded[key], excepted)
def _test_scrypt_upgrade(self, attr, summary_key, new_value):
hasher = get_hasher("scrypt")
self.assertEqual(hasher.algorithm, "scrypt")
self.assertNotEqual(getattr(hasher, attr), new_value)
old_value = getattr(hasher, attr)
try:
# Generate hash with attr set to the new value.
setattr(hasher, attr, new_value)
encoded = make_password("lètmein", "seasalt", "scrypt")
attr_value = hasher.safe_summary(encoded)[summary_key]
self.assertEqual(attr_value, new_value)
state = {"upgraded": False}
def setter(password):
state["upgraded"] = True
# No update is triggered.
self.assertIs(check_password("lètmein", encoded, setter, "scrypt"), True)
self.assertIs(state["upgraded"], False)
# Revert to the old value.
setattr(hasher, attr, old_value)
# Password is updated.
self.assertIs(check_password("lètmein", encoded, setter, "scrypt"), True)
self.assertIs(state["upgraded"], True)
finally:
setattr(hasher, attr, old_value)
def test_scrypt_upgrade(self):
tests = [
("work_factor", "work factor", 2**11),
("block_size", "block size", 10),
("parallelism", "parallelism", 2),
]
for attr, summary_key, new_value in tests:
with self.subTest(attr=attr):
self._test_scrypt_upgrade(attr, summary_key, new_value)
| TestUtilsHashPassScrypt |
python | google__pytype | build_scripts/build_utils.py | {
"start": 5329,
"end": 8774
} | class ____:
"""A class to collect failures."""
def __init__(self):
self._failures = []
def add_failure(self, mod_name, log_file):
self._failures.append((mod_name, log_file))
def print_report(self, verbose):
num_failures = len(self._failures)
if num_failures == 0:
return
print("\n%d test module(s) failed: \n" % num_failures)
for mod_name, log_file in self._failures:
msg = f"** {mod_name}"
if log_file:
msg += f" - {log_file}"
print(msg)
if log_file and verbose:
with open(log_file.strip()) as f:
print(f.read(), file=sys.stderr)
def run_ninja(targets, fail_collector=None, fail_fast=False, verbose=False):
"""Run ninja over the list of specified targets.
Arguments:
targets: The list of targets to run.
fail_collector: A FailCollector object to collect failures.
fail_fast: If True, abort at the first target failure.
verbose: If True, print verbose output.
Returns:
True if no target fails. False, otherwise.
"""
# The -k option to ninja, set to a very high value, makes it run until it
# detects all failures. So, we set it to a high value unless |fail_fast| is
# True.
cmd = ["ninja", "-k", "1" if fail_fast else "100000"] + targets
with subprocess.Popen(
cmd,
cwd=OUT_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
) as process:
failed_targets = []
# When verbose output is requested, test failure logs are printed to stderr.
# However, sometimes a test fails without generating a log, in which case we
# need to print the ninja build output to see what happened.
print_if_verbose = False
with open(NINJA_LOG, "w") as ninja_log:
while True:
line = process.stdout.readline()
if not line:
break
ninja_log.write(line)
msg_type, modname, logfile = parse_ninja_output_line(line)
if msg_type == _NINJA_FAILURE_MSG:
# This is a failed ninja target.
failed_targets.append(line[len(NINJA_FAILURE_PREFIX) :].strip())
print_if_verbose = True
if (
msg_type == _TEST_MODULE_PASS_MSG
or msg_type == _TEST_MODULE_FAIL_MSG
):
print(line)
if msg_type == _TEST_MODULE_FAIL_MSG:
fail_collector.add_failure(modname, logfile)
print_if_verbose = False
if verbose and print_if_verbose:
print(line.rstrip())
if failed_targets:
# For convenience, we will print the list of failed targets.
summary_hdr = (
">>> Found Ninja target failures (includes test failures):"
)
print("\n" + summary_hdr)
ninja_log.write("\n" + summary_hdr + "\n")
for t in failed_targets:
target = f" - {t}"
print(target)
ninja_log.write(target + "\n")
process.wait()
if process.returncode == 0:
return True
else:
# Ninja output can be a lot. Printing it here will clutter the output of
# this script. So, just tell the user how to repro the error.
print(f">>> FAILED: Ninja command '{shlex.join(cmd)}'.")
print(">>> Run it in the 'out' directory to reproduce.")
print(f">>> Full Ninja output is available in '{NINJA_LOG}'.")
print(">>> Failing test modules (if any) will be reported below.")
return False
| FailCollector |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 25490,
"end": 31068
} | class ____:
"""Integer array selection against a single dimension."""
dim_len: int
dim_chunk_len: int
nchunks: int
nitems: int
order: Order
dim_sel: npt.NDArray[np.intp]
dim_out_sel: npt.NDArray[np.intp]
chunk_nitems: int
dim_chunk_ixs: npt.NDArray[np.intp]
chunk_nitems_cumsum: npt.NDArray[np.intp]
def __init__(
self,
dim_sel: npt.NDArray[np.intp],
dim_len: int,
dim_chunk_len: int,
wraparound: bool = True,
boundscheck: bool = True,
order: Order = Order.UNKNOWN,
) -> None:
# ensure 1d array
dim_sel = np.asanyarray(dim_sel)
if not is_integer_array(dim_sel, 1):
raise IndexError("integer arrays in an orthogonal selection must be 1-dimensional only")
nitems = len(dim_sel)
nchunks = ceildiv(dim_len, dim_chunk_len)
# handle wraparound
if wraparound:
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
if boundscheck:
boundscheck_indices(dim_sel, dim_len)
# determine which chunk is needed for each selection item
# note: for dense integer selections, the division operation here is the
# bottleneck
dim_sel_chunk = dim_sel // dim_chunk_len
# determine order of indices
if order == Order.UNKNOWN:
order = Order.check(dim_sel)
order = Order(order)
if order == Order.INCREASING:
dim_out_sel = None
elif order == Order.DECREASING:
dim_sel = dim_sel[::-1]
# TODO should be possible to do this without creating an arange
dim_out_sel = np.arange(nitems - 1, -1, -1)
else:
# sort indices to group by chunk
dim_out_sel = np.argsort(dim_sel_chunk)
dim_sel = np.take(dim_sel, dim_out_sel)
# precompute number of selected items for each chunk
chunk_nitems = np.bincount(dim_sel_chunk, minlength=nchunks)
# find chunks that we need to visit
dim_chunk_ixs = np.nonzero(chunk_nitems)[0]
# compute offsets into the output array
chunk_nitems_cumsum = np.cumsum(chunk_nitems)
# store attributes
object.__setattr__(self, "dim_len", dim_len)
object.__setattr__(self, "dim_chunk_len", dim_chunk_len)
object.__setattr__(self, "nchunks", nchunks)
object.__setattr__(self, "nitems", nitems)
object.__setattr__(self, "order", order)
object.__setattr__(self, "dim_sel", dim_sel)
object.__setattr__(self, "dim_out_sel", dim_out_sel)
object.__setattr__(self, "chunk_nitems", chunk_nitems)
object.__setattr__(self, "dim_chunk_ixs", dim_chunk_ixs)
object.__setattr__(self, "chunk_nitems_cumsum", chunk_nitems_cumsum)
def __iter__(self) -> Iterator[ChunkDimProjection]:
for dim_chunk_ix in self.dim_chunk_ixs:
dim_out_sel: slice | npt.NDArray[np.intp]
# find region in output
if dim_chunk_ix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[dim_chunk_ix - 1]
stop = self.chunk_nitems_cumsum[dim_chunk_ix]
if self.order == Order.INCREASING:
dim_out_sel = slice(start, stop)
else:
dim_out_sel = self.dim_out_sel[start:stop]
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[start:stop] - dim_offset
is_complete_chunk = False # TODO
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk)
def slice_to_range(s: slice, length: int) -> range:
return range(*s.indices(length))
def ix_(selection: Any, shape: tuple[int, ...]) -> npt.NDArray[np.intp]:
"""Convert an orthogonal selection to a numpy advanced (fancy) selection, like ``numpy.ix_``
but with support for slices and single ints."""
# normalisation
selection = replace_ellipsis(selection, shape)
# replace slice and int as these are not supported by numpy.ix_
selection = [
slice_to_range(dim_sel, dim_len)
if isinstance(dim_sel, slice)
else [dim_sel]
if is_integer(dim_sel)
else dim_sel
for dim_sel, dim_len in zip(selection, shape, strict=True)
]
# now get numpy to convert to a coordinate selection
selection = np.ix_(*selection)
return cast("npt.NDArray[np.intp]", selection)
def oindex(a: npt.NDArray[Any], selection: Selection) -> npt.NDArray[Any]:
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple(i for i, s in enumerate(selection) if is_integer(s))
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result
def oindex_set(a: npt.NDArray[Any], selection: Selection, value: Any) -> None:
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple(i for i, s in enumerate(selection) if is_integer(s))
selection = ix_(selection, a.shape)
if not np.isscalar(value) and drop_axes:
value = np.asanyarray(value)
value_selection: list[Selector | None] = [slice(None)] * len(a.shape)
for i in drop_axes:
value_selection[i] = np.newaxis
value = value[tuple(value_selection)]
a[selection] = value
@dataclass(frozen=True)
| IntArrayDimIndexer |
python | kamyu104__LeetCode-Solutions | Python/cat-and-mouse.py | {
"start": 54,
"end": 1767
} | class ____(object):
def catMouseGame(self, graph):
"""
:type graph: List[List[int]]
:rtype: int
"""
HOLE, MOUSE_START, CAT_START = range(3)
DRAW, MOUSE, CAT = range(3)
def parents(m, c, t):
if t == CAT:
for nm in graph[m]:
yield nm, c, MOUSE^CAT^t
else:
for nc in graph[c]:
if nc != HOLE:
yield m, nc, MOUSE^CAT^t
degree = {}
ignore = set(graph[HOLE])
for m in xrange(len(graph)):
for c in xrange(len(graph)):
degree[m, c, MOUSE] = len(graph[m])
degree[m, c, CAT] = len(graph[c])-(c in ignore)
color = collections.defaultdict(int)
q = collections.deque()
for i in xrange(len(graph)):
if i == HOLE:
continue
color[HOLE, i, CAT] = MOUSE
q.append((HOLE, i, CAT, MOUSE))
for t in [MOUSE, CAT]:
color[i, i, t] = CAT
q.append((i, i, t, CAT))
while q:
i, j, t, c = q.popleft()
for ni, nj, nt in parents(i, j, t):
if color[ni, nj, nt] != DRAW:
continue
if nt == c:
color[ni, nj, nt] = c
q.append((ni, nj, nt, c))
continue
degree[ni, nj, nt] -= 1
if not degree[ni, nj, nt]:
color[ni, nj, nt] = c
q.append((ni, nj, nt, c))
return color[MOUSE_START, CAT_START, MOUSE]
# Time: O(n^3)
# Space: O(n^2)
import collections
| Solution |
python | etianen__django-reversion | tests/test_app/tests/test_commands.py | {
"start": 3773,
"end": 4021
} | class ____(TestModelMixin, TestBase):
def testDeleteRevisions(self):
with reversion.create_revision():
TestModel.objects.create()
self.callCommand("deleterevisions")
self.assertNoRevision()
| DeleteRevisionsTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/executor/multiprocess.py | {
"start": 2321,
"end": 4783
} | class ____(ChildProcessCommand):
def __init__(
self,
run_config: Mapping[str, object],
dagster_run: "DagsterRun",
step_key: str,
instance_ref: "InstanceRef",
term_event: Any,
recon_pipeline: ReconstructableJob,
retry_mode: RetryMode,
known_state: Optional[KnownExecutionState],
repository_load_data: Optional[RepositoryLoadData],
):
self.run_config = run_config
self.dagster_run = dagster_run
self.step_key = step_key
self.instance_ref = instance_ref
self.term_event = term_event
self.recon_pipeline = recon_pipeline
self.retry_mode = retry_mode
self.known_state = known_state
self.repository_load_data = repository_load_data
def execute(self) -> Iterator[DagsterEvent]:
recon_job = self.recon_pipeline
with DagsterInstance.from_ref(self.instance_ref) as instance:
done_event = threading.Event()
start_termination_thread(self.term_event, done_event)
try:
log_manager = create_context_free_log_manager(instance, self.dagster_run)
yield DagsterEvent.step_worker_started(
log_manager,
self.dagster_run.job_name,
message=f'Executing step "{self.step_key}" in subprocess.',
metadata={
"pid": MetadataValue.text(str(os.getpid())),
},
step_key=self.step_key,
)
execution_plan = create_execution_plan(
job=recon_job,
run_config=self.run_config,
step_keys_to_execute=[self.step_key],
known_state=self.known_state,
repository_load_data=self.repository_load_data,
)
yield from execute_plan_iterator(
execution_plan,
recon_job,
self.dagster_run,
run_config=self.run_config,
retry_mode=self.retry_mode.for_inner_plan(),
instance=instance,
)
finally:
# set events to stop the termination thread on exit
done_event.set() # waiting on term_event so set done first
self.term_event.set()
| MultiprocessExecutorChildProcessCommand |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 16193,
"end": 16475
} | class ____(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return Piecewise(
((chebyshevt(n + 1, x)/(n + 1) -
chebyshevt(n - 1, x)/(n - 1))/2, Ne(Abs(n), 1)),
(x**2/2, True))
@dataclass
| ChebyshevTRule |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 3842,
"end": 11374
} | class ____:
NoHostPort = "connection configuration must contain 'host' and 'port'."
HostType = "Type of 'host' must be str."
PortType = "Type of 'port' must be str or int."
ConnDiffConf = (
"Alias of %r already creating connections, "
"but the configure is not the same as passed in."
)
AliasType = "Alias should be string, but %r is given."
ConnLackConf = "You need to pass in the configuration of the connection named %r ."
ConnectFirst = "should create connection first."
CollectionNotExistNoSchema = "Collection %r not exist, or you can pass in schema to create one."
NoSchema = "Should be passed into the schema."
EmptySchema = "The field of the schema cannot be empty."
SchemaType = "Schema type must be schema.CollectionSchema."
SchemaInconsistent = (
"The collection already exist, but the schema is not the same as the schema passed in."
)
AutoIDWithData = "Auto_id is True, primary field should not have data."
AutoIDType = "Param auto_id must be bool type."
NumPartitionsType = "Param num_partitions must be int type."
AutoIDInconsistent = (
"The auto_id of the collection is inconsistent "
"with the auto_id of the primary key field."
)
AutoIDIllegalRanges = "The auto-generated id ranges should be pairs."
ConsistencyLevelInconsistent = (
"The parameter consistency_level is inconsistent with that of existed collection."
)
AutoIDOnlyOnPK = "The auto_id can only be specified on the primary key field"
AutoIDFieldType = (
"The auto_id can only be specified on field with DataType.INT64 and DataType.VARCHAR."
)
NumberRowsInvalid = "Must pass in at least one column"
FieldsNumInconsistent = "The data fields number is not match with schema."
NoVector = "No vector field is found."
NoneDataFrame = "Dataframe can not be None."
DataFrameType = "Data type must be pandas.DataFrame."
NoPrimaryKey = "Schema must have a primary key field."
PrimaryKeyNotExist = "Primary field must in dataframe."
PrimaryKeyOnlyOne = "Expected only one primary key field, got [%s, %s, ...]."
PartitionKeyOnlyOne = "Expected only one partition key field, got [%s, %s, ...]."
PrimaryKeyType = "Primary key type must be DataType.INT64 or DataType.VARCHAR."
PartitionKeyType = "Partition key field type must be DataType.INT64 or DataType.VARCHAR."
PartitionKeyNotPrimary = "Partition key field should not be primary field"
IsPrimaryType = "Param is_primary must be bool type."
PrimaryFieldType = "Param primary_field must be int or str type."
PartitionKeyFieldType = "Param partition_key_field must be str type."
PartitionKeyFieldNotExist = "the specified partition key field {%s} not exist"
IsPartitionKeyType = "Param is_partition_key must be bool type."
DataTypeInconsistent = (
"The Input data type is inconsistent with defined schema, please check it."
)
FieldDataInconsistent = "The Input data type is inconsistent with defined schema, {%s} field should be a %s, but got a {%s} instead."
DataTypeNotSupport = "Data type is not support."
DataLengthsInconsistent = "Arrays must all be same length."
DataFrameInvalid = "Cannot infer schema from empty dataframe."
NdArrayNotSupport = "Data type not support numpy.ndarray."
TypeOfDataAndSchemaInconsistent = "The types of schema and data do not match."
PartitionAlreadyExist = "Partition already exist."
IndexNotExist = "Index doesn't exist."
CollectionType = "The type of collection must be pymilvus.Collection."
FieldsType = "The fields of schema must be type list."
FunctionsType = "The functions of collection must be type list."
FunctionIncorrectInputOutputType = "The type of function input and output must be str."
FunctionInvalidOutputField = (
"The output field must not be primary key, partition key, clustering key."
)
FunctionDuplicateInputs = "Duplicate input field names are not allowed in function."
FunctionDuplicateOutputs = "Duplicate output field names are not allowed in function."
FunctionCommonInputOutput = "Input and output field names must be different."
BM25FunctionIncorrectInputOutputCount = (
"BM25 function must have exact 1 input and 1 output field."
)
TextEmbeddingFunctionIncorrectInputOutputCount = (
"TextEmbedding function must have exact 1 input and 1 output field."
)
TextEmbeddingFunctionIncorrectInputFieldType = (
"TextEmbedding function input field must be VARCHAR."
)
TextEmbeddingFunctionIncorrectOutputFieldType = (
"TextEmbedding function output field must be FLOAT_VECTOR or INT8_VECTOR."
)
BM25FunctionIncorrectInputFieldType = "BM25 function input field must be VARCHAR."
BM25FunctionIncorrectOutputFieldType = "BM25 function output field must be SPARSE_FLOAT_VECTOR."
FunctionMissingInputField = "Function input field not found in collection schema."
FunctionMissingOutputField = "Function output field not found in collection schema."
UnknownFunctionType = "Unknown function type."
FunctionIncorrectType = "The function of schema type must be Function."
FieldType = "The field of schema type must be FieldSchema."
FieldDtype = "Field dtype must be of DataType"
ExprType = "The type of expr must be string ,but %r is given."
EnvConfigErr = "Environment variable %s has a wrong format, please check it: %s"
AmbiguousIndexName = "There are multiple indexes, please specify the index_name."
InsertUnexpectedField = (
"Attempt to insert an unexpected field `%s` to collection without enabling dynamic field"
)
InsertUnexpectedFunctionOutputField = (
"Attempt to insert an unexpected function output field `%s` to collection"
)
InsertMissedField = (
"Insert missed an field `%s` to collection without set nullable==true or set default_value"
)
InsertFieldsLenInconsistent = (
"The data fields length is inconsistent. previous length is %d, current length is %d"
)
UpsertAutoIDTrue = "Upsert don't support autoid == true"
AmbiguousDeleteFilterParam = (
"Ambiguous filter parameter, only one deletion condition can be specified."
)
AmbiguousQueryFilterParam = (
"Ambiguous parameter, either ids or filter should be specified, cannot support both."
)
JSONKeyMustBeStr = "JSON key must be str."
ClusteringKeyType = (
"Clustering key field type must be DataType.INT8, DataType.INT16, "
"DataType.INT32, DataType.INT64, DataType.FLOAT, DataType.DOUBLE, "
"DataType.VARCHAR, DataType.FLOAT_VECTOR."
)
ClusteringKeyFieldNotExist = "the specified clustering key field {%s} not exist"
ClusteringKeyOnlyOne = "Expected only one clustering key field, got [%s, %s, ...]."
IsClusteringKeyType = "Param is_clustering_key must be bool type."
ClusteringKeyFieldType = "Param clustering_key_field must be str type."
UpsertPrimaryKeyEmpty = "Upsert need to assign pk."
DefaultValueInvalid = (
"Default value cannot be None for a field that is defined as nullable == false."
)
SearchIteratorV2FallbackWarning = """
The server does not support Search Iterator V2. The search_iterator (v1) is used instead.
Please upgrade your Milvus server version to 2.5.2 and later,
or use a pymilvus version before 2.5.3 (excluded) to avoid this issue.
"""
| ExceptionsMessage |
python | pytorch__pytorch | test/dynamo/test_reconstruct.py | {
"start": 495,
"end": 14056
} | class ____(torch._dynamo.test_case.TestCase):
@contextlib.contextmanager
def register_bytecode_hook(self, fn):
def hook(code, out_code):
fn(list(dis.get_instructions(out_code)))
return None
torch._dynamo.reset()
handle = torch._dynamo.convert_frame.register_bytecode_hook(hook)
try:
yield
finally:
handle.remove()
def test_ConstDict_optimize_reconstruct(self):
"""
Emit code to reconstruct only the key that changed
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct only d[40]
self.assertEqual(build_map[0].argval, 1)
def f(d, t):
d[40] = t + 1
t = torch.randn(3, 4)
d = {1: t}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_pop_reconstruct(self):
"""
If something is pop'ed from the dict, we reconstruct everything
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct everything
self.assertEqual(build_map[0].argval, 2)
def f(d, t):
d.pop(2)
d[40] = t + 1
t = torch.randn(3, 4)
d = {1: t, 2: t + 1}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_popitem_reconstruct(self):
"""
If something is pop'ed from the dict, we reconstruct everything
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct everything
self.assertEqual(build_map[0].argval, 1)
def f(d, t):
d.popitem()
t = torch.randn(3, 4)
d = {1: t, 2: t + 1}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_popitem_reconstruct_graph_break(self):
"""
If something is pop'ed from the dict, we reconstruct everything.
Calling dict.popitem will graph break.
"""
def f(d, t):
d.popitem()
t = torch.randn(3, 4)
d = {1: t, 2: t + 1}
d_opt = d.copy()
f(d, t)
opt_f = torch.compile(backend="eager")(f)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_del_reconstruct(self):
"""
If something is deleted from the dict, we reconstruct everything
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct everything
self.assertEqual(build_map[0].argval, 2)
def f(d, t):
del d[2]
d[40] = t + 1
t = torch.randn(3, 4)
d = {1: t, 2: t + 1}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_get_reconstruct(self):
"""
dict.get shouldn't affect anything
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
self.assertEqual(build_map[0].argval, 1)
load_const = _filter_instructions(instructions, "LOAD_CONST")
self.assertNotIn(123, load_const)
def f(d, t):
d[456] = d.get(456) + t
t = torch.randn(3, 4)
d = {123: t, 456: t + 1}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_ConstDict_clear_reconstruct(self):
"""
If dict.clear() is used, we reconstruct everything
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct everything
self.assertEqual(build_map[0].argval, 1)
def f(d, t):
d.clear()
d[3] = t + 3
t = torch.randn(3, 4)
d = {1: t, 2: t + 1}
d_opt = d.copy()
f(d, t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
opt_f(d_opt, t)
self.assertEqual(d, d_opt)
def test_create_dict_reconstruct(self):
"""
If dict is created inside a function, everything needs to be reconstructed
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
self.assertEqual(len(build_map), 1)
# reconstruct everything
self.assertEqual(build_map[0].argval, 2)
def f(t):
return {1: t, 2: t + 1}
t = torch.randn(3, 4)
d = f(t)
with self.register_bytecode_hook(hook):
opt_f = torch.compile(f, backend="eager", fullgraph=True)
d_opt = opt_f(t)
self.assertEqual(d, d_opt)
@unittest.skipIf(
IS_FBCODE, "capturing functional_call is not enabled by default in FB_CODE"
)
def test_functional_call_reconstruct(self):
"""
PyTorch shouldn't codegen any key/value when functional_call is used
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
# don't reconstruct anything
self.assertEqual(len(build_map), 0)
m = torch.nn.Linear(3, 3)
new_bias = torch.randn(3)
new_weight = torch.randn(3, 3)
def fn(new_weight, new_bias, x):
return torch.func.functional_call(
m, {"weight": new_weight, "bias": new_bias}, x
)
x = torch.randn(2, 3)
expected = torch.nn.functional.linear(x, new_weight, new_bias)
with self.register_bytecode_hook(hook):
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
got = opt_fn(new_weight, new_bias, x)
self.assertEqual(expected, got)
@unittest.skipIf(
IS_FBCODE, "capturing functional_call is not enabled by default in FB_CODE"
)
def test_functional_call_reconstruct_2(self):
"""
PyTorch shouldn't codegen any key/value when functional_call is used
"""
def hook(instructions: list[dis.Instruction]):
build_map = _filter_instructions(instructions, "BUILD_MAP")
# don't reconstruct anything
self.assertEqual(len(build_map), 0)
class DummyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.ModuleDict(
{
"b": torch.nn.ModuleDict(
{
"c": torch.nn.ModuleDict(
{
"d": torch.nn.ModuleDict(
{"e": torch.nn.Linear(10, 10, bias=False)}
)
}
)
}
)
}
)
def forward(self, x):
return self.a.b.c.d.e(x)
model = DummyModule()
def fn(model, states, x):
return torch.func.functional_call(model, states, x)
x = torch.randn(2, 3)
states = model.state_dict()
x = torch.randn(10, 10)
expected = fn(model, states, x)
with self.register_bytecode_hook(hook):
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
got = opt_fn(model, states, x)
self.assertEqual(expected, got)
def test_graph_break_in_wrapped_user_function(self):
def fn(x):
x = x + 1
torch._dynamo.graph_break()
assert torch.compiler.is_compiling()
assert not torch.is_grad_enabled()
return x + 2
@torch.compile(backend="eager")
def gn(x):
x = torch.no_grad()(fn)(x)
# reconstruction failure would cause a skipped frame
assert torch.compiler.is_compiling()
assert torch.is_grad_enabled()
return x
inp = torch.randn(3)
self.assertEqual(gn(inp), inp + 3)
def test_graph_break_in_wrapped_user_method(self):
class Foo:
def __init__(self):
self.a = 1
self.b = 2
def fn(self, x):
x = x + self.a
torch._dynamo.graph_break()
assert torch.compiler.is_compiling()
assert not torch.is_grad_enabled()
return x + self.b
obj = Foo()
@torch.compile(backend="eager")
def gn(x):
obj.fn = torch.no_grad()(obj.fn)
x = obj.fn(x)
# reconstruction failure would cause a skipped frame
assert torch.compiler.is_compiling()
assert torch.is_grad_enabled()
return x
inp = torch.randn(3)
self.assertEqual(gn(inp), inp + 3)
def test_graph_break_in_wrapped_nested_function(self):
@torch.compile(backend="eager")
def gn(x):
a = 1
b = 2
@torch.no_grad()
def fn(x):
x = x + a
torch._dynamo.graph_break()
assert torch.compiler.is_compiling()
assert not torch.is_grad_enabled()
return x + b
x = fn(x)
# reconstruction failure would cause a skipped frame
assert torch.compiler.is_compiling()
assert torch.is_grad_enabled()
return x
inp = torch.randn(3)
self.assertEqual(gn(inp), inp + 3)
def test_graph_break_in_wrapped_skipped_function(self):
from torch._dynamo import trace_rules
from torch._dynamo.testing import _skipped_function_for_test_reconstruct
from torch._dynamo.variables import SkipFunctionVariable
self.assertIs(
trace_rules.lookup(_skipped_function_for_test_reconstruct),
SkipFunctionVariable,
)
def fn(x):
x = x + 1
torch._dynamo.graph_break()
assert torch.compiler.is_compiling()
assert not torch.is_grad_enabled()
return x + 2
@torch.compile(backend="eager")
def gn(x):
x = torch.no_grad()(_skipped_function_for_test_reconstruct)(fn, x)
# reconstruction failure would cause a skipped frame
assert torch.compiler.is_compiling()
assert torch.is_grad_enabled()
return x
inp = torch.randn(3)
self.assertEqual(gn(inp), inp + 3)
@requires_triton()
@unittest.skipIf(
not has_triton_experimental_host_tma(),
"Test requires triton.tools.experimental_descriptor API",
)
def test_tma_experimental_reconstruct(self):
import triton
def create_tma(tensor):
tma = triton.tools.experimental_descriptor.create_2d_tma_descriptor(
tensor.data_ptr(),
tensor.size(0),
tensor.size(1),
32,
32,
tensor.element_size(),
)
return tensor + 1, tma
x = torch.randn(128, 128, device=GPU_TYPE)
ref = create_tma(x)
res = torch.compile(create_tma, backend="eager")(x)
self.assertEqual(ref[1].desc, res[1].desc)
@requires_triton()
@unittest.skipIf(
not has_triton_tensor_descriptor_host_tma(),
"Test requires triton.tools.tensor_descriptor API",
)
def test_tma_stable_reconstruct(self):
import triton
def create_tma(tensor):
tma = triton.tools.tensor_descriptor.TensorDescriptor.from_tensor(
tensor,
[32, 32],
)
return tensor + 1, tma
x = torch.randn(128, 128, device=GPU_TYPE)
ref = create_tma(x)
res = torch.compile(create_tma, backend="eager")(x)
self.assertEqual(ref, res)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| ReconstructTest |
python | dask__distributed | distributed/versions.py | {
"start": 4422,
"end": 4512
} | class ____(Warning):
"""Indicates version mismatch between nodes"""
| VersionMismatchWarning |
python | vyperlang__vyper | vyper/evm/assembler/instructions.py | {
"start": 3087,
"end": 3565
} | class ____:
def __init__(self, label: Label):
assert isinstance(label, Label), label
self.label = label
def __repr__(self):
return f"PUSHLABEL {self.label.label}"
def __eq__(self, other):
if not isinstance(other, PUSHLABEL):
return False
return self.label == other.label
def __hash__(self):
return hash(self.label)
# push the result of an addition (which might be resolvable at compile-time)
| PUSHLABEL |
python | getsentry__sentry | tests/sentry/rules/conditions/test_reappeared_event.py | {
"start": 209,
"end": 513
} | class ____(RuleTestCase):
rule_cls = ReappearedEventCondition
def test_applies_correctly(self) -> None:
rule = self.get_rule()
self.assertPasses(rule, self.event, has_escalated=True)
self.assertDoesNotPass(rule, self.event, has_escalated=False)
| ReappearedEventConditionTest |
python | plotly__plotly.py | plotly/graph_objs/_surface.py | {
"start": 215,
"end": 79001
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "surface"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"coloraxis",
"colorbar",
"colorscale",
"connectgaps",
"contours",
"customdata",
"customdatasrc",
"hidesurface",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"lighting",
"lightposition",
"meta",
"metasrc",
"name",
"opacity",
"opacityscale",
"reversescale",
"scene",
"showlegend",
"showscale",
"stream",
"surfacecolor",
"surfacecolorsrc",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"x",
"xcalendar",
"xhoverformat",
"xsrc",
"y",
"ycalendar",
"yhoverformat",
"ysrc",
"z",
"zcalendar",
"zhoverformat",
"zsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here z or surfacecolor) or the
bounds set in `cmin` and `cmax` Defaults to `false` when `cmin`
and `cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as z or surfacecolor and if set, `cmin` must be set
as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as z or surfacecolor. Has no effect when `cauto` is
`false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as z or surfacecolor and if set, `cmax` must be set
as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.surface.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the `z` data are filled in.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
@property
def contours(self):
"""
The 'contours' property is an instance of Contours
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Contours`
- A dict of string/value properties that will be passed
to the Contours constructor
Returns
-------
plotly.graph_objs.surface.Contours
"""
return self["contours"]
@contours.setter
def contours(self, val):
self["contours"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def hidesurface(self):
"""
Determines whether or not a surface is drawn. For example, set
`hidesurface` to False `contours.x.show` to True and
`contours.y.show` to True to draw a wire frame plot.
The 'hidesurface' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["hidesurface"]
@hidesurface.setter
def hidesurface(self, val):
self["hidesurface"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.surface.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.surface.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def lighting(self):
"""
The 'lighting' property is an instance of Lighting
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Lighting`
- A dict of string/value properties that will be passed
to the Lighting constructor
Returns
-------
plotly.graph_objs.surface.Lighting
"""
return self["lighting"]
@lighting.setter
def lighting(self, val):
self["lighting"] = val
@property
def lightposition(self):
"""
The 'lightposition' property is an instance of Lightposition
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Lightposition`
- A dict of string/value properties that will be passed
to the Lightposition constructor
Returns
-------
plotly.graph_objs.surface.Lightposition
"""
return self["lightposition"]
@lightposition.setter
def lightposition(self, val):
self["lightposition"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the surface. Please note that in the case
of using high `opacity` values for example a value greater than
or equal to 0.5 on two surfaces (and 0.25 with four surfaces),
an overlay of multiple transparent surfaces may not perfectly
be sorted in depth by the webgl API. This behavior may be
improved in the near future and is subject to change.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacityscale(self):
"""
Sets the opacityscale. The opacityscale must be an array
containing arrays mapping a normalized value to an opacity
value. At minimum, a mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1], [0.5, 0.2], [1,
1]]` means that higher/lower values would have higher opacity
values and those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette name string of
the following list: 'min', 'max', 'extremes' and 'uniform'. The
default is 'uniform'.
The 'opacityscale' property accepts values of any type
Returns
-------
Any
"""
return self["opacityscale"]
@opacityscale.setter
def opacityscale(self, val):
self["opacityscale"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `cmin` will
correspond to the last color in the array and `cmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def scene(self):
"""
Sets a reference between this trace's 3D coordinate system and
a 3D scene. If "scene" (the default value), the (x,y,z)
coordinates refer to `layout.scene`. If "scene2", the (x,y,z)
coordinates refer to `layout.scene2`, and so on.
The 'scene' property is an identifier of a particular
subplot, of type 'scene', that may be specified as the string 'scene'
optionally followed by an integer >= 1
(e.g. 'scene', 'scene1', 'scene2', 'scene3', etc.)
Returns
-------
str
"""
return self["scene"]
@scene.setter
def scene(self, val):
self["scene"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.surface.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def surfacecolor(self):
"""
Sets the surface color values, used for setting a color scale
independent of `z`.
The 'surfacecolor' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["surfacecolor"]
@surfacecolor.setter
def surfacecolor(self, val):
self["surfacecolor"] = val
@property
def surfacecolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`surfacecolor`.
The 'surfacecolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["surfacecolorsrc"]
@surfacecolorsrc.setter
def surfacecolorsrc(self, val):
self["surfacecolorsrc"] = val
@property
def text(self):
"""
Sets the text elements associated with each z value. If trace
`hoverinfo` contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
Sets the x coordinates.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the y coordinates.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def z(self):
"""
Sets the z coordinates.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zcalendar(self):
"""
Sets the calendar system to use with `z` date data.
The 'zcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["zcalendar"]
@zcalendar.setter
def zcalendar(self, val):
self["zcalendar"] = val
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `zaxis.hoverformat`.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here z or surfacecolor)
or the bounds set in `cmin` and `cmax` Defaults to
`false` when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as z or surfacecolor.
Has no effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hidesurface
Determines whether or not a surface is drawn. For
example, set `hidesurface` to False `contours.x.show`
to True and `contours.y.show` to True to draw a wire
frame plot.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.surface.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be an
array containing arrays mapping a normalized value to
an opacity value. At minimum, a mapping for the lowest
(0) and highest (1) values are required. For example,
`[[0, 1], [0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and those in
the middle would be more transparent Alternatively,
`opacityscale` may be a palette name string of the
following list: 'min', 'max', 'extremes' and 'uniform'.
The default is 'uniform'.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.surface.Stream` instance
or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting a color
scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud for
`surfacecolor`.
text
Sets the text elements associated with each z value. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colorscale=None,
connectgaps=None,
contours=None,
customdata=None,
customdatasrc=None,
hidesurface=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
lighting=None,
lightposition=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
opacityscale=None,
reversescale=None,
scene=None,
showlegend=None,
showscale=None,
stream=None,
surfacecolor=None,
surfacecolorsrc=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xcalendar=None,
xhoverformat=None,
xsrc=None,
y=None,
ycalendar=None,
yhoverformat=None,
ysrc=None,
z=None,
zcalendar=None,
zhoverformat=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Surface object
The data the describes the coordinates of the surface is set in
`z`. Data in `z` should be a 2D list. Coordinates in `x` and
`y` can either be 1D lists or 2D lists (e.g. to graph
parametric surfaces). If not provided in `x` and `y`, the x and
y coordinates are assumed to be linear starting at 0 with a
unit step. The color scale corresponds to the `z` values by
default. For custom color scales, use `surfacecolor` which
should be a 2D list, where its bounds can be controlled using
`cmin` and `cmax`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Surface`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here z or surfacecolor)
or the bounds set in `cmin` and `cmax` Defaults to
`false` when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as z or surfacecolor.
Has no effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hidesurface
Determines whether or not a surface is drawn. For
example, set `hidesurface` to False `contours.x.show`
to True and `contours.y.show` to True to draw a wire
frame plot.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.surface.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be an
array containing arrays mapping a normalized value to
an opacity value. At minimum, a mapping for the lowest
(0) and highest (1) values are required. For example,
`[[0, 1], [0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and those in
the middle would be more transparent Alternatively,
`opacityscale` may be a palette name string of the
following list: 'min', 'max', 'extremes' and 'uniform'.
The default is 'uniform'.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.surface.Stream` instance
or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting a color
scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud for
`surfacecolor`.
text
Sets the text elements associated with each z value. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Surface
"""
super().__init__("surface")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Surface
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Surface`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("connectgaps", arg, connectgaps)
self._set_property("contours", arg, contours)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("hidesurface", arg, hidesurface)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("lighting", arg, lighting)
self._set_property("lightposition", arg, lightposition)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("opacityscale", arg, opacityscale)
self._set_property("reversescale", arg, reversescale)
self._set_property("scene", arg, scene)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("surfacecolor", arg, surfacecolor)
self._set_property("surfacecolorsrc", arg, surfacecolorsrc)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("ycalendar", arg, ycalendar)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("ysrc", arg, ysrc)
self._set_property("z", arg, z)
self._set_property("zcalendar", arg, zcalendar)
self._set_property("zhoverformat", arg, zhoverformat)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "surface"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Surface |
python | python__mypy | mypyc/irbuild/for_helpers.py | {
"start": 44966,
"end": 48489
} | class ____(ForGenerator):
"""Generate IR for a for loop of form `for x, ... in zip(a, ...)`."""
def need_cleanup(self) -> bool:
# The wrapped for loops might need cleanup. We might generate a
# redundant cleanup block, but that's okay.
return True
def init(self, indexes: list[Lvalue], exprs: list[Expression]) -> None:
assert len(indexes) == len(exprs)
# Condition check will require multiple basic blocks, since there will be
# multiple conditions to check.
self.cond_blocks = [BasicBlock() for _ in range(len(indexes) - 1)] + [self.body_block]
self.gens: list[ForGenerator] = []
for index, expr, next_block in zip(indexes, exprs, self.cond_blocks):
gen = make_for_loop_generator(
self.builder, index, expr, next_block, self.loop_exit, self.line, nested=True
)
self.gens.append(gen)
def gen_condition(self) -> None:
for i, gen in enumerate(self.gens):
gen.gen_condition()
if i < len(self.gens) - 1:
self.builder.activate_block(self.cond_blocks[i])
def begin_body(self) -> None:
for gen in self.gens:
gen.begin_body()
def gen_step(self) -> None:
for gen in self.gens:
gen.gen_step()
def gen_cleanup(self) -> None:
for gen in self.gens:
gen.gen_cleanup()
def get_expr_length(builder: IRBuilder, expr: Expression) -> int | None:
if isinstance(expr, (StrExpr, BytesExpr)):
return len(expr.value)
elif isinstance(expr, (ListExpr, TupleExpr)):
# if there are no star expressions, or we know the length of them,
# we know the length of the expression
stars = [get_expr_length(builder, i) for i in expr.items if isinstance(i, StarExpr)]
if None not in stars:
other = sum(not isinstance(i, StarExpr) for i in expr.items)
return other + sum(stars) # type: ignore [arg-type]
elif isinstance(expr, StarExpr):
return get_expr_length(builder, expr.expr)
elif (
isinstance(expr, RefExpr)
and isinstance(expr.node, Var)
and expr.node.is_final
and isinstance(expr.node.final_value, str)
and expr.node.has_explicit_value
):
return len(expr.node.final_value)
# TODO: extend this, passing length of listcomp and genexp should have worthwhile
# performance boost and can be (sometimes) figured out pretty easily. set and dict
# comps *can* be done as well but will need special logic to consider the possibility
# of key conflicts. Range, enumerate, zip are all simple logic.
# we might still be able to get the length directly from the type
rtype = builder.node_type(expr)
if isinstance(rtype, RTuple):
return len(rtype.types)
return None
def get_expr_length_value(
builder: IRBuilder, expr: Expression, expr_reg: Value, line: int, use_pyssize_t: bool
) -> Value:
rtype = builder.node_type(expr)
assert is_sequence_rprimitive(rtype) or isinstance(rtype, RTuple), rtype
length = get_expr_length(builder, expr)
if length is None:
# We cannot compute the length at compile time, so we will fetch it.
return builder.builder.builtin_len(expr_reg, line, use_pyssize_t=use_pyssize_t)
# The expression result is known at compile time, so we can use a constant.
return Integer(length, c_pyssize_t_rprimitive if use_pyssize_t else short_int_rprimitive)
| ForZip |
python | gabrielfalcao__HTTPretty | httpretty/errors.py | {
"start": 1322,
"end": 2168
} | class ____(HTTPrettyError):
def __init__(self, message='Failed to handle network request', request=None, address=None):
hint = 'Tip: You could try setting (allow_net_connect=True) to allow unregistered requests through a real TCP connection in addition to (verbose=True) to debug the issue.'
if request:
headers = json.dumps(dict(request.headers), indent=2)
message = '{message}.\n\nIntercepted unknown {request.method} request {request.url}\n\nWith headers {headers}'.format(**locals())
if isinstance(address, (tuple, list)):
address = ":".join(map(str, address))
if address:
hint = 'address: {address} | {hint}'.format(**locals())
self.request = request
super(UnmockedError, self).__init__('{message}\n\n{hint}'.format(**locals()))
| UnmockedError |
python | pypa__virtualenv | src/virtualenv/activation/bash/__init__.py | {
"start": 132,
"end": 648
} | class ____(ViaTemplateActivator):
def templates(self):
yield "activate.sh"
def as_name(self, template):
return Path(template).stem
def replacements(self, creator, dest):
data = super().replacements(creator, dest)
data.update({
"__TCL_LIBRARY__": getattr(creator.interpreter, "tcl_lib", None) or "",
"__TK_LIBRARY__": getattr(creator.interpreter, "tk_lib", None) or "",
})
return data
__all__ = [
"BashActivator",
]
| BashActivator |
python | fastai__fastai | fastai/callback/hook.py | {
"start": 4195,
"end": 9220
} | class ____(Callback):
"`Callback` that can be used to register hooks on `modules`"
_methods = ["hook"]
hook = noops
def __init__(self, modules=None, every=None, remove_end=True, is_forward=True, detach=True, cpu=True, include_paramless=False , **kwargs):
store_attr('modules,every,remove_end,is_forward,detach,cpu, include_paramless')
assert not kwargs
def before_fit(self):
"Register the `Hooks` on `self.modules`."
if self.modules is None: self.modules = [m for m in flatten_model(self.model) if self.include_paramless or has_params(m)]
if self.every is None: self._register()
def before_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._register()
def after_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._remove()
def after_fit(self):
"Remove the `Hooks`."
if self.remove_end: self._remove()
def _register(self): self.hooks = Hooks(self.modules, self.hook, self.is_forward, self.detach, self.cpu)
def _remove(self):
if getattr(self, 'hooks', None): self.hooks.remove()
def __del__(self): self._remove()
# %% ../../nbs/15_callback.hook.ipynb 59
def total_params(m):
"Give the number of parameters of a module and if it's trainable or not"
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
# %% ../../nbs/15_callback.hook.ipynb 61
def layer_info(learn, *xb):
"Return layer infos of `model` on `xb` (only support batch first inputs)"
def _track(m, i, o):
params, trainable, shape = '', '', ''
same = any((isinstance(x[0], torch.Tensor) and x[0].shape[1:] == x[1].shape for x in zip(i, o)))
shape = apply(lambda x: x.shape, o)
if hasattr(m, 'weight'): # non activation layer
params, trainable = total_params(m)
return (type(m).__name__, params, trainable, shape, same)
with Hooks(flatten_model(learn.model), _track) as h:
batch = apply(lambda o:o[:1], xb)
train_only_cbs = [cb for cb in learn.cbs if hasattr(cb, '_only_train_loop')]
with learn.removed_cbs(train_only_cbs), learn.no_logging(), learn as l:
r = l.get_preds(dl=[batch], inner=True, reorder=False)
return h.stored
# %% ../../nbs/15_callback.hook.ipynb 66
def _get_shapes(o, bs):
inp = o[first(o)] if (isinstance(o, dict)) else o
return ' x '.join([str(bs)] + [str(t) for t in inp[1:]])
def _print_shapes(o, bs):
if isinstance(o, torch.Size): return _get_shapes(o, bs)
elif isinstance(o, tuple): return _get_shapes(o[0], bs)
else: return str([_print_shapes(x, bs) for x in o])
# %% ../../nbs/15_callback.hook.ipynb 67
def module_summary(learn, *xb):
"Print a summary of `model` using `xb`"
#Individual parameters wrapped in ParameterModule aren't called through the hooks in `layer_info`,
# thus are not counted inside the summary
#TODO: find a way to have them counted in param number somehow
infos = layer_info(learn, *xb)
n,bs = 76,find_bs(xb)
inp_sz = _print_shapes(apply(lambda x:x.shape, xb), bs)
res = f"{type(learn.model).__name__} (Input shape: {inp_sz})\n"
res += "=" * n + "\n"
res += f"{'Layer (type)':<20} {'Output Shape':<20} {'Param #':<10} {'Trainable':<10}\n"
res += "=" * n
ps,trn_ps,j = 0,0,0
infos = [o for o in infos if o is not None] #see comment in previous cell
prev_sz = None
for typ,np,trn,sz,chnged in infos:
if sz is None: continue
if j == 0:
res += f'\n{"":<20} {_print_shapes(sz, bs)[:19]:<20}' # to avoid a double line at the top
if not chnged and not prev_sz == sz and j > 0: res += "\n" + "_" * n + "\n" + f'{"":<20} {_print_shapes(sz, bs)[:19]:<20}'
j = 1
res += f"\n{typ:<20} {'':<20} {np:<10} {str(trn):<10}"
if np != '':
ps += np
if trn: trn_ps += np
prev_sz = sz
res += "\n" + "_" * n + "\n"
res += f"\nTotal params: {ps:,}\n"
res += f"Total trainable params: {trn_ps:,}\n"
res += f"Total non-trainable params: {ps - trn_ps:,}\n\n"
return PrettyString(res)
# %% ../../nbs/15_callback.hook.ipynb 68
@patch
def summary(self:Learner):
"Print a summary of the model, optimizer and loss function."
xb = self.dls.train.one_batch()[:getattr(self.dls.train, "n_inp", 1)]
res = module_summary(self, *xb)
res += f"Optimizer used: {self.opt_func}\nLoss function: {self.loss_func}\n\n"
if self.opt is not None:
res += f"Model " + ("unfrozen\n\n" if self.opt.frozen_idx==0 else f"frozen up to parameter group #{self.opt.frozen_idx}\n\n")
res += "Callbacks:\n" + '\n'.join(f" - {cb}" for cb in self.cbs.sorted('order'))
return PrettyString(res)
# %% ../../nbs/15_callback.hook.ipynb 74
@delegates()
| HookCallback |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/operate/configuration/run_config/asset_example/resources.py | {
"start": 23,
"end": 208
} | class ____(dg.Config):
person_name: str
@dg.definitions
def resources() -> dg.Definitions:
return dg.Definitions(resources={"config": MyAssetConfig(person_name="")})
| MyAssetConfig |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 23292,
"end": 24699
} | class ____(VariableCoder):
"""Encode NonString variables if dtypes differ."""
def encode(self, variable: Variable, name: T_Name = None) -> Variable:
if "dtype" in variable.encoding and variable.encoding["dtype"] not in (
"S1",
str,
):
dims, data, attrs, encoding = unpack_for_encoding(variable)
dtype = np.dtype(encoding.pop("dtype"))
if dtype != variable.dtype:
if np.issubdtype(dtype, np.integer):
if (
np.issubdtype(variable.dtype, np.floating)
and "_FillValue" not in variable.attrs
and "missing_value" not in variable.attrs
):
warnings.warn(
f"saving variable {name} with floating "
"point data as an integer dtype without "
"any _FillValue to use for NaNs",
SerializationWarning,
stacklevel=10,
)
data = duck_array_ops.round(data)
data = duck_array_ops.astype(data, dtype=dtype)
return Variable(dims, data, attrs, encoding, fastpath=True)
else:
return variable
def decode(self):
raise NotImplementedError()
| NonStringCoder |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_vitals.py | {
"start": 584,
"end": 4262
} | class ____(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
VITALS = {
"measurements.lcp": {"thresholds": [0, 2500, 4000]},
"measurements.fid": {"thresholds": [0, 100, 300]},
"measurements.cls": {"thresholds": [0, 0.1, 0.25]},
"measurements.fcp": {"thresholds": [0, 1000, 3000]},
"measurements.fp": {"thresholds": [0, 1000, 3000]},
}
def get(self, request: Request, organization: Organization) -> Response:
if not self.has_feature(organization, request):
return Response(status=404)
with sentry_sdk.start_span(op="discover.endpoint", name="parse params"):
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response([])
vitals = [vital.lower() for vital in request.GET.getlist("vital", [])]
if len(vitals) == 0:
raise ParseError(detail="Need to pass at least one vital")
performance_use_metrics = features.has(
"organizations:performance-use-metrics",
organization=organization,
actor=request.user,
)
dataset = self.get_dataset(request) if performance_use_metrics else discover
metrics_enhanced = dataset != discover
sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced)
allow_metric_aggregates = request.GET.get("preventMetricAggregates") != "1"
selected_columns = []
for vital in vitals:
if vital not in self.VITALS:
raise ParseError(detail=f"{vital} is not a valid vital")
selected_columns.extend(
[
f"p75({vital})",
f"count_web_vitals({vital}, good)",
f"count_web_vitals({vital}, meh)",
f"count_web_vitals({vital}, poor)",
f"count_web_vitals({vital}, any)",
]
)
with handle_query_errors():
events_results = dataset.query(
selected_columns=selected_columns,
query=request.GET.get("query"),
snuba_params=snuba_params,
# Results should only ever have 1 result
limit=1,
referrer="api.events.vitals",
auto_fields=True,
auto_aggregations=False,
use_aggregate_conditions=False,
allow_metric_aggregates=allow_metric_aggregates,
transform_alias_to_input_format=False,
)
results = {}
if len(events_results["data"]) == 1:
event_data = events_results["data"][0]
for vital in vitals:
results[vital] = {
"p75": event_data.get(get_function_alias(f"p75({vital})")),
"total": event_data.get(get_function_alias(f"count_web_vitals({vital}, any)"))
or 0,
"good": event_data.get(get_function_alias(f"count_web_vitals({vital}, good)"))
or 0,
"meh": event_data.get(get_function_alias(f"count_web_vitals({vital}, meh)"))
or 0,
"poor": event_data.get(get_function_alias(f"count_web_vitals({vital}, poor)"))
or 0,
}
results["meta"] = {"isMetricsData": events_results["meta"].get("isMetricsData", False)}
return Response(results)
| OrganizationEventsVitalsEndpoint |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/importwizard.py | {
"start": 15685,
"end": 17686
} | class ____(QWidget):
"""Import wizard preview widget"""
def __init__(self, parent):
QWidget.__init__(self, parent)
vert_layout = QVBoxLayout()
# Type frame
type_layout = QHBoxLayout()
type_label = QLabel(_("Import as"))
type_layout.addWidget(type_label)
self.array_btn = array_btn = QRadioButton(_("array"))
available_array = np.ndarray is not FakeObject
array_btn.setEnabled(available_array)
array_btn.setChecked(available_array)
type_layout.addWidget(array_btn)
list_btn = QRadioButton(_("list"))
list_btn.setChecked(not array_btn.isChecked())
type_layout.addWidget(list_btn)
if pd:
self.df_btn = df_btn = QRadioButton(_("DataFrame"))
df_btn.setChecked(False)
type_layout.addWidget(df_btn)
h_spacer = QSpacerItem(40, 20,
QSizePolicy.Expanding, QSizePolicy.Minimum)
type_layout.addItem(h_spacer)
type_frame = QFrame()
type_frame.setLayout(type_layout)
self._table_view = PreviewTable(self)
vert_layout.addWidget(type_frame)
vert_layout.addWidget(self._table_view)
self.setLayout(vert_layout)
def open_data(self, text, colsep=u"\t", rowsep=u"\n",
transpose=False, skiprows=0, comments='#'):
"""Open clipboard text as table"""
if pd:
self.pd_text = text
self.pd_info = dict(sep=colsep, lineterminator=rowsep,
skiprows=skiprows, comment=comments)
if colsep is None:
self.pd_info = dict(lineterminator=rowsep, skiprows=skiprows,
comment=comments, delim_whitespace=True)
self._table_view.process_data(text, colsep, rowsep, transpose,
skiprows, comments)
def get_data(self):
"""Return table data"""
return self._table_view.get_data()
| PreviewWidget |
python | pydata__xarray | xarray/namedarray/_typing.py | {
"start": 5057,
"end": 5833
} | class ____(_array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co]):
"""
Duck array supporting NEP 47.
Corresponds to np.ndarray.
"""
def __getitem__(
self,
key: (
_IndexKeyLike | Any
), # TODO: Any should be _arrayapi[Any, _dtype[np.integer]]
/,
) -> _arrayapi[Any, Any]: ...
def __array_namespace__(self) -> ModuleType: ...
# NamedArray can most likely use both __array_function__ and __array_namespace__:
_arrayfunction_or_api = (_arrayfunction, _arrayapi)
duckarray = Union[
_arrayfunction[_ShapeType_co, _DType_co], _arrayapi[_ShapeType_co, _DType_co]
]
# Corresponds to np.typing.NDArray:
DuckArray = _arrayfunction[Any, np.dtype[_ScalarType_co]]
@runtime_checkable
| _arrayapi |
python | explosion__spaCy | spacy/lang/th/__init__.py | {
"start": 468,
"end": 1090
} | class ____(DummyTokenizer):
def __init__(self, vocab: Vocab) -> None:
try:
from pythainlp.tokenize import word_tokenize
except ImportError:
raise ImportError(
"The Thai tokenizer requires the PyThaiNLP library: "
"https://github.com/PyThaiNLP/pythainlp"
) from None
self.word_tokenize = word_tokenize
self.vocab = vocab
def __call__(self, text: str) -> Doc:
words = list(self.word_tokenize(text))
spaces = [False] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
| ThaiTokenizer |
python | tensorflow__tensorflow | tensorflow/python/saved_model/model_utils/export_output.py | {
"start": 1077,
"end": 3249
} | class ____:
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
_SEPARATOR_CHAR = '/'
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
def _check_output_key(self, key, error_label):
# For multi-head models, the key can be a tuple.
if isinstance(key, tuple):
key = self._SEPARATOR_CHAR.join(key)
if not isinstance(key, str):
raise ValueError(
'{} output key must be a string; got {}.'.format(error_label, key))
return key
def _wrap_and_check_outputs(
self, outputs, single_output_default_name, error_label=None):
"""Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descriptive string for use in error messages. If none,
single_output_default_name will be used.
Returns:
A dict of tensors
Raises:
ValueError: if the outputs dict keys are not strings or tuples of strings
or the values are not Tensors.
"""
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key = self._check_output_key(key, error_name)
if not isinstance(value, tensor.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
error_name, value))
output_dict[key] = value
return output_dict
| ExportOutput |
python | readthedocs__readthedocs.org | readthedocs/invitations/models.py | {
"start": 2627,
"end": 7257
} | class ____(TimeStampedModel):
"""
Invitation model.
An invitation can be attached to an existing user or to an email.
"""
# Generic foreign key.
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
object = GenericForeignKey("content_type", "object_id")
# Normal fields.
from_user = models.ForeignKey(
User,
verbose_name=_("From user"),
on_delete=models.CASCADE,
related_name="invitations_sent",
)
to_user = models.ForeignKey(
User,
verbose_name=_("To user"),
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="invitations_received",
)
to_email = models.EmailField(_("E-mail"), null=True, blank=True)
token = models.CharField(
unique=True,
max_length=32,
)
expiration_date = models.DateTimeField(_("Expiration date"))
objects = InvitationQueryset.as_manager()
class Meta:
indexes = [
models.Index(fields=["content_type", "object_id"]),
]
unique_together = [
("to_user", "content_type", "object_id"),
("to_email", "content_type", "object_id"),
]
@property
def username(self):
if self.to_user:
return self.to_user.username
return self.to_email
@property
def expired(self):
return timezone.now() > self.expiration_date
@cached_property
def backend(self):
return get_backend(self)
def save(self, *args, **kwargs):
if not self.token:
self.token = self.generate_token()
if not self.expiration_date:
self.expiration_date = timezone.now() + timezone.timedelta(
days=settings.RTD_INVITATIONS_EXPIRATION_DAYS
)
super().save(*args, **kwargs)
@staticmethod
def generate_token():
return get_random_string(32)
def redeem(self, user=None, request=None):
"""
Redeem invitation.
`user` will be used only if the invitation is attached
to an email, otherwise `to_user` user will be used.
:param request: If given, a log entry will be created.
"""
if self.expired:
return False
if self.to_user:
user = self.to_user
log.info(
"Redeeming invitation",
invitation_pk=self.pk,
for_user=user.username,
object_type=self.object_type,
object_name=self.object_name,
object_pk=self.object.pk,
)
if request:
self.create_audit_log(
action=AuditLog.INVITATION_ACCEPTED,
request=request,
user=user,
)
return self.backend.redeem(user=user)
def get_success_url(self):
"""URL to redirect after the invitation has been redeemed."""
return self.backend.get_success_url()
def get_origin_url(self):
"""URL from where the invitations for the object are created."""
return self.backend.get_origin_url()
def get_absolute_url(self):
return reverse("invitations_redeem", args=[self.token])
def can_revoke_invitation(self, user):
"""
Check whether the user can revoke the invitation.
A user can revoke an invitation if it's the owner
of the object attached to it.
"""
return self.backend.owns_object(user)
@property
def object_type(self):
return self.content_type.model
@property
def object_name(self):
return self.backend.get_object_name()
@property
def object_url(self):
return self.backend.get_object_url()
def send(self):
self.backend.send_invitation()
def create_audit_log(self, action, request, user=None):
"""Create an audit log entry for this invitation."""
from readthedocs.audit.serializers import InvitationSerializer
# Attach the proper project and organization to the log.
kwargs = {}
object_type = self.object_type
if object_type == "organization":
kwargs["organization"] = self.object
elif object_type == "project":
kwargs["project"] = self.object
elif object_type == "team":
kwargs["organization"] = self.object.organization
AuditLog.objects.new(
action=action,
request=request,
data=InvitationSerializer(self).data,
user=user,
**kwargs,
)
| Invitation |
python | coleifer__peewee | tests/models.py | {
"start": 86076,
"end": 91689
} | class ____(ModelTestCase):
requires = [User, Tweet]
def setUp(self):
super(TestForUpdateIntegration, self).setUp()
self.alt_db = new_connection()
class AltUser(User):
class Meta:
database = self.alt_db
table_name = User._meta.table_name
class AltTweet(Tweet):
class Meta:
database = self.alt_db
table_name = Tweet._meta.table_name
self.AltUser = AltUser
self.AltTweet = AltTweet
def tearDown(self):
self.alt_db.close()
super(TestForUpdateIntegration, self).tearDown()
@skip_if(IS_CRDB, 'crdb locks-up on this test, blocking reads')
def test_for_update(self):
with self.database.atomic():
User.create(username='huey')
zaizee = User.create(username='zaizee')
AltUser = self.AltUser
with self.database.manual_commit():
self.database.begin()
users = (User.select().where(User.username == 'zaizee')
.for_update()
.execute())
updated = (User
.update(username='ziggy')
.where(User.username == 'zaizee')
.execute())
self.assertEqual(updated, 1)
if IS_POSTGRESQL:
nrows = (AltUser
.update(username='huey-x')
.where(AltUser.username == 'huey')
.execute())
self.assertEqual(nrows, 1)
query = (AltUser
.select(AltUser.username)
.where(AltUser.id == zaizee.id))
self.assertEqual(query.get().username, 'zaizee')
self.database.commit()
self.assertEqual(query.get().username, 'ziggy')
def test_for_update_blocking(self):
User.create(username='u1')
AltUser = self.AltUser
evt = threading.Event()
def run_in_thread():
with self.alt_db.atomic():
evt.wait()
n = (AltUser.update(username='u1-y')
.where(AltUser.username == 'u1')
.execute())
self.assertEqual(n, 0)
t = threading.Thread(target=run_in_thread)
t.daemon = True
t.start()
with self.database.atomic() as txn:
q = (User.select()
.where(User.username == 'u1')
.for_update()
.execute())
evt.set()
n = (User.update(username='u1-x')
.where(User.username == 'u1')
.execute())
self.assertEqual(n, 1)
t.join(timeout=5)
u = User.get()
self.assertEqual(u.username, 'u1-x')
def test_for_update_nested(self):
User.insert_many([(u,) for u in 'abc']).execute()
subq = User.select().where(User.username != 'b').for_update()
nrows = (User
.delete()
.where(User.id.in_(subq))
.execute())
self.assertEqual(nrows, 2)
def test_for_update_nowait(self):
User.create(username='huey')
zaizee = User.create(username='zaizee')
AltUser = self.AltUser
with self.database.manual_commit():
self.database.begin()
users = (User
.select(User.username)
.where(User.username == 'zaizee')
.for_update(nowait=True)
.execute())
def will_fail():
return (AltUser
.select()
.where(AltUser.username == 'zaizee')
.for_update(nowait=True)
.get())
self.assertRaises((OperationalError, InternalError), will_fail)
self.database.commit()
@requires_postgresql
@requires_models(User, Tweet)
def test_for_update_of(self):
h = User.create(username='huey')
z = User.create(username='zaizee')
Tweet.create(user=h, content='h')
Tweet.create(user=z, content='z')
AltUser, AltTweet = self.AltUser, self.AltTweet
with self.database.manual_commit():
self.database.begin()
# Lock tweets by huey.
query = (Tweet
.select()
.join(User)
.where(User.username == 'huey')
.for_update(of=Tweet, nowait=True))
qr = query.execute()
# No problem updating zaizee's tweet or huey's user.
nrows = (AltTweet
.update(content='zx')
.where(AltTweet.user == z.id)
.execute())
self.assertEqual(nrows, 1)
nrows = (AltUser
.update(username='huey-x')
.where(AltUser.username == 'huey')
.execute())
self.assertEqual(nrows, 1)
def will_fail():
(AltTweet
.select()
.where(AltTweet.user == h)
.for_update(nowait=True)
.get())
self.assertRaises((OperationalError, InternalError), will_fail)
self.database.commit()
query = Tweet.select(Tweet, User).join(User).order_by(Tweet.id)
self.assertEqual([(t.content, t.user.username) for t in query],
[('h', 'huey-x'), ('zx', 'zaizee')])
| TestForUpdateIntegration |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_reflection.py | {
"start": 2200,
"end": 12118
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "oracle"
__sparse_driver_backend__ = True
@classmethod
def setup_test_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
with testing.db.begin() as conn:
for stmt in (
"""
create table %(test_schema)s.parent(
id integer primary key,
data varchar2(50)
);
COMMENT ON TABLE %(test_schema)s.parent IS 'my table comment';
create table %(test_schema)s.child(
id integer primary key,
data varchar2(50),
parent_id integer references %(test_schema)s.parent(id)
);
create table local_table(
id integer primary key,
data varchar2(50)
);
create synonym %(test_schema)s.ptable for %(test_schema)s.parent;
create synonym %(test_schema)s.ctable for %(test_schema)s.child;
create synonym %(test_schema)s_pt for %(test_schema)s.parent;
create synonym %(test_schema)s.local_table for local_table;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* can't give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on %(test_schema)s.parent to public;
grant references on %(test_schema)s.child to public;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
for stmt in (
"""
drop table %(test_schema)s.child;
drop table %(test_schema)s.parent;
drop table local_table;
drop synonym %(test_schema)s.ctable;
drop synonym %(test_schema)s.ptable;
drop synonym %(test_schema)s_pt;
drop synonym %(test_schema)s.local_table;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
def test_create_same_names_explicit_schema(self, metadata, connection):
schema = testing.db.dialect.default_schema_name
meta = metadata
parent = Table(
"parent",
meta,
Column("pid", Integer, primary_key=True),
schema=schema,
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("%s.parent.pid" % schema)),
schema=schema,
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_table_owner_local_synonym(self):
meta = MetaData()
parent = Table(
"%s_pt" % testing.config.test_schema,
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s_pt.id, "
"%(test_schema)s_pt.data FROM %(test_schema)s_pt"
% {"test_schema": testing.config.test_schema},
)
def test_reflect_alt_synonym_owner_local_table(self):
meta = MetaData()
parent = Table(
"local_table",
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s.local_table.id, "
"%(test_schema)s.local_table.data "
"FROM %(test_schema)s.local_table"
% {"test_schema": testing.config.test_schema},
)
def test_create_same_names_implicit_schema(self, metadata, connection):
meta = metadata
parent = Table(
"parent", meta, Column("pid", Integer, primary_key=True)
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("parent.pid")),
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_owner_explicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child ON "
"%(test_schema)s.parent.id = %(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
# check table comment (#5146)
eq_(parent.comment, "my table comment")
def test_reflect_table_comment(self, metadata, connection):
local_parent = Table(
"parent",
metadata,
Column("q", Integer),
comment="my local comment",
)
local_parent.create(connection)
insp = inspect(connection)
eq_(
insp.get_table_comment(
"parent", schema=testing.config.test_schema
),
{"text": "my table comment"},
)
eq_(
insp.get_table_comment(
"parent",
),
{"text": "my local comment"},
)
eq_(
insp.get_table_comment(
"parent", schema=connection.dialect.default_schema_name
),
{"text": "my local comment"},
)
def test_reflect_local_to_remote(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%(test_schema)s.parent(id))"
% {"test_schema": testing.config.test_schema},
)
try:
meta = MetaData()
lcl = Table("localtable", meta, autoload_with=testing.db)
parent = meta.tables["%s.parent" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.parent JOIN localtable ON "
"%(test_schema)s.parent.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_alt_owner_implicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child "
"ON %(test_schema)s.parent.id = "
"%(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
def test_reflect_alt_owner_synonyms(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%s.ptable(id))" % testing.config.test_schema,
)
try:
meta = MetaData()
lcl = Table(
"localtable",
meta,
autoload_with=connection,
oracle_resolve_synonyms=True,
)
parent = meta.tables["%s.ptable" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.ptable JOIN localtable ON "
"%(test_schema)s.ptable.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
connection.execute(
select(parent, lcl).select_from(parent.join(lcl))
).fetchall()
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_remote_synonyms(self):
meta = MetaData()
parent = Table(
"ptable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
child = Table(
"ctable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.ptable JOIN "
"%(test_schema)s.ctable "
"ON %(test_schema)s.ptable.id = "
"%(test_schema)s.ctable.parent_id"
% {"test_schema": testing.config.test_schema},
)
| MultiSchemaTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.