language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-assemblyai/llama_index/readers/assemblyai/base.py | {
"start": 212,
"end": 801
} | class ____(Enum):
"""Transcript format to use for the document reader."""
TEXT = "text"
"""One document with the transcription text"""
SENTENCES = "sentences"
"""Multiple documents, splits the transcription by each sentence"""
PARAGRAPHS = "paragraphs"
"""Multiple documents, splits the transcription by each paragraph"""
SUBTITLES_SRT = "subtitles_srt"
"""One document with the transcript exported in SRT subtitles format"""
SUBTITLES_VTT = "subtitles_vtt"
"""One document with the transcript exported in VTT subtitles format"""
| TranscriptFormat |
python | davidhalter__jedi | test/completion/pep0484_generic_parameters.py | {
"start": 5297,
"end": 7084
} | class ____(Generic[T_co]):
def __init__(self, val: T_co) -> None:
self.val = val
# Test extraction of type from a custom generic type
def custom(x: CustomGeneric[T]) -> T:
return x.val
custom_instance: CustomGeneric[int] = CustomGeneric(42)
#? int()
custom(custom_instance)
x3 = custom(custom_instance)
#? int()
x3
# Test construction of a custom generic type
def wrap_custom(iterable: Iterable[T]) -> List[CustomGeneric[T]]:
return [CustomGeneric(x) for x in iterable]
#? int()
wrap_custom(list_ints)[0].val
x4 = wrap_custom(list_ints)[0]
#? int()
x4.val
for x5 in wrap_custom(list_ints):
#? int()
x5.val
# Test extraction of type from a nested custom generic type
list_custom_instances: List[CustomGeneric[int]] = [CustomGeneric(42)]
def unwrap_custom(iterable: Iterable[CustomGeneric[T]]) -> List[T]:
return [x.val for x in iterable]
#? int()
unwrap_custom(list_custom_instances)[0]
x6 = unwrap_custom(list_custom_instances)[0]
#? int()
x6
for x7 in unwrap_custom(list_custom_instances):
#? int()
x7
for xc in unwrap_custom([CustomGeneric(s) for s in 'abc']):
#? str()
xc
for xg in unwrap_custom(CustomGeneric(s) for s in 'abc'):
#? str()
xg
# Test extraction of type from type parameer nested within a custom generic type
custom_instance_list_int: CustomGeneric[List[int]] = CustomGeneric([42])
def unwrap_custom2(instance: CustomGeneric[Iterable[T]]) -> List[T]:
return list(instance.val)
#? int()
unwrap_custom2(custom_instance_list_int)[0]
x8 = unwrap_custom2(custom_instance_list_int)[0]
#? int()
x8
for x9 in unwrap_custom2(custom_instance_list_int):
#? int()
x9
# Test that classes which have generic parents but are not generic themselves
# are still inferred correctly.
| CustomGeneric |
python | walkccc__LeetCode | solutions/2711. Difference of Number of Distinct Values on Diagonals/2711.py | {
"start": 0,
"end": 1016
} | class ____:
def differenceOfDistinctValues(self, grid: list[list[int]]) -> list[list[int]]:
m = len(grid)
n = len(grid[0])
ans = [[0] * n for _ in range(m)]
def fillInDiagonal(i: int, j: int) -> None:
topLeft = set()
bottomRight = set()
# Fill in the diagonal from the top-left to the bottom-right.
while i < len(grid) and j < len(grid[0]):
ans[i][j] = len(topLeft)
# Post-addition, so this information can be utilized in subsequent cells.
topLeft.add(grid[i][j])
i += 1
j += 1
i -= 1
j -= 1
# Fill in the diagonal from the bottom-right to the top-left.
while i >= 0 and j >= 0:
ans[i][j] = abs(ans[i][j] - len(bottomRight))
# Post-addition, so this information can be utilized in subsequent cells.
bottomRight.add(grid[i][j])
i -= 1
j -= 1
for i in range(m):
fillInDiagonal(i, 0)
for j in range(1, n):
fillInDiagonal(0, j)
return ans
| Solution |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/sqlparser.py | {
"start": 2122,
"end": 2436
} | class ____(TypedDict):
"""get_table_schemas params."""
normalize_name: Callable[[str], str]
is_cross_db: bool
information_schema_columns: list[str]
information_schema_table: str
use_flat_cross_db_query: bool
is_uppercase_names: bool
database: str | None
@define
| GetTableSchemasParams |
python | faif__python-patterns | patterns/creational/builder.py | {
"start": 1622,
"end": 1770
} | class ____(Building):
def build_floor(self) -> None:
self.floor = "One"
def build_size(self) -> None:
self.size = "Big"
| House |
python | PyCQA__pylint | tests/functional/i/iterable_context_py36.py | {
"start": 132,
"end": 1150
} | class ____:
async def __aiter__(self):
for value in range(20):
yield value
async def to_list(self):
values = [m async for m in self]
other_values = [m for m in self] # [not-an-iterable]
for value in self: # [not-an-iterable]
yield value
async for value in self:
yield value
async def some_iter_func(number):
""" emits 1 number per second """
for i in range(1, number):
yield i
await asyncio.sleep(1)
async def count_to(number):
""" counts to n in async manner"""
async for i in some_iter_func(number):
print(i)
async def gen_values(num_values):
for value in range(num_values):
yield value
async def do_some_comprehensions():
sets = {elem async for elem in gen_values(10)}
lists = [elem async for elem in gen_values(10)]
dicts = {elem: elem async for elem in gen_values(10)}
gen = (elem async for elem in gen_values(10))
return sets, lists, dicts, gen
| AIter |
python | pypa__pip | tests/functional/test_config_settings.py | {
"start": 1684,
"end": 9947
} | class ____:
def build_wheel(
self,
wheel_directory,
config_settings=None,
metadata_directory=None
):
if config_settings is None:
config_settings = {}
w = os.path.join(wheel_directory, "{{name}}-1.0-py3-none-any.whl")
with open(w, "wb") as f:
with ZipFile(f, "w") as z:
make_wheel(
z, "{{name}}", "1.0", "{{requires_dist}}",
[("{{name}}-config.json", json.dumps(config_settings))]
)
return "{{name}}-1.0-py3-none-any.whl"
build_editable = build_wheel
main = Backend()
'''
def make_project(
path: Path, name: str = "foo", dependencies: list[str] | None = None
) -> tuple[str, str, Path]:
version = "1.0"
project_dir = path / name
backend = project_dir / "backend"
backend.mkdir(parents=True)
(project_dir / "pyproject.toml").write_text(PYPROJECT_TOML)
requires_dist = [f"Requires-Dist: {dep}" for dep in dependencies or []]
(backend / "dummy_backend.py").write_text(
BACKEND_SRC.replace("{{name}}", name).replace(
"{{requires_dist}}", "\n".join(requires_dist)
)
)
return name, version, project_dir
def test_backend_sees_config(script: PipTestEnvironment) -> None:
name, version, project_dir = make_project(script.scratch_path)
script.pip(
"wheel",
"--config-settings",
"FOO=Hello",
project_dir,
)
wheel_file_name = f"{name}-{version}-py3-none-any.whl"
wheel_file_path = script.cwd / wheel_file_name
with open(wheel_file_path, "rb") as f:
with ZipFile(f) as z:
output = z.read(f"{name}-config.json")
assert json.loads(output) == {"FOO": "Hello"}
def test_backend_sees_config_reqs(script: PipTestEnvironment) -> None:
name, version, project_dir = make_project(script.scratch_path)
script.scratch_path.joinpath("reqs.txt").write_text(
f"{project_dir} --config-settings FOO=Hello"
)
script.pip("wheel", "-r", "reqs.txt")
wheel_file_name = f"{name}-{version}-py3-none-any.whl"
wheel_file_path = script.cwd / wheel_file_name
with open(wheel_file_path, "rb") as f:
with ZipFile(f) as z:
output = z.read(f"{name}-config.json")
assert json.loads(output) == {"FOO": "Hello"}
def test_backend_sees_config_via_constraint(script: PipTestEnvironment) -> None:
name, version, project_dir = make_project(script.scratch_path)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"{name} @ {path_to_url(str(project_dir))}")
script.pip(
"wheel",
"--config-settings",
"FOO=Hello",
"-c",
"constraints.txt",
name,
)
wheel_file_name = f"{name}-{version}-py3-none-any.whl"
wheel_file_path = script.cwd / wheel_file_name
with open(wheel_file_path, "rb") as f:
with ZipFile(f) as z:
output = z.read(f"{name}-config.json")
assert json.loads(output) == {"FOO": "Hello"}
@pytest.mark.network
def test_backend_sees_config_via_sdist(script: PipTestEnvironment) -> None:
name, version, project_dir = make_project(script.scratch_path)
dists_dir = script.scratch_path / "dists"
dists_dir.mkdir()
with tarfile.open(dists_dir / f"{name}-{version}.tar.gz", "w:gz") as dist_tar:
dist_tar.add(project_dir, arcname=name)
script.pip(
"wheel",
"--config-settings",
"FOO=Hello",
"-f",
dists_dir,
name,
)
wheel_file_name = f"{name}-{version}-py3-none-any.whl"
wheel_file_path = script.cwd / wheel_file_name
with open(wheel_file_path, "rb") as f:
with ZipFile(f) as z:
output = z.read(f"{name}-config.json")
assert json.loads(output) == {"FOO": "Hello"}
def test_req_file_does_not_see_config(script: PipTestEnvironment) -> None:
"""Test that CLI config settings do not propagate to requirement files."""
name, _, project_dir = make_project(script.scratch_path)
reqs_file = script.scratch_path / "reqs.txt"
reqs_file.write_text(f"{project_dir}")
script.pip(
"install",
"--config-settings",
"FOO=Hello",
"-r",
reqs_file,
)
config = script.site_packages_path / f"{name}-config.json"
with open(config, "rb") as f:
assert json.load(f) == {}
def test_dep_does_not_see_config(script: PipTestEnvironment) -> None:
"""Test that CLI config settings do not propagate to dependencies."""
_, _, bar_project_dir = make_project(script.scratch_path, name="bar")
_, _, foo_project_dir = make_project(
script.scratch_path,
name="foo",
dependencies=[f"bar @ {path_to_url(str(bar_project_dir))}"],
)
script.pip(
"install",
"--config-settings",
"FOO=Hello",
foo_project_dir,
)
foo_config = script.site_packages_path / "foo-config.json"
with open(foo_config, "rb") as f:
assert json.load(f) == {"FOO": "Hello"}
bar_config = script.site_packages_path / "bar-config.json"
with open(bar_config, "rb") as f:
assert json.load(f) == {}
def test_dep_in_req_file_does_not_see_config(script: PipTestEnvironment) -> None:
"""Test that CLI config settings do not propagate to dependencies found in
requirement files."""
_, _, bar_project_dir = make_project(script.scratch_path, name="bar")
_, _, foo_project_dir = make_project(
script.scratch_path,
name="foo",
dependencies=["bar"],
)
reqs_file = script.scratch_path / "reqs.txt"
reqs_file.write_text(f"bar @ {path_to_url(str(bar_project_dir))}")
script.pip(
"install",
"--config-settings",
"FOO=Hello",
"-r",
reqs_file,
foo_project_dir,
)
foo_config = script.site_packages_path / "foo-config.json"
with open(foo_config, "rb") as f:
assert json.load(f) == {"FOO": "Hello"}
bar_config = script.site_packages_path / "bar-config.json"
with open(bar_config, "rb") as f:
assert json.load(f) == {}
def test_install_sees_config(script: PipTestEnvironment) -> None:
name, _, project_dir = make_project(script.scratch_path)
script.pip(
"install",
"--config-settings",
"FOO=Hello",
project_dir,
)
config = script.site_packages_path / f"{name}-config.json"
with open(config, "rb") as f:
assert json.load(f) == {"FOO": "Hello"}
def test_install_sees_config_reqs(script: PipTestEnvironment) -> None:
name, _, project_dir = make_project(script.scratch_path)
script.scratch_path.joinpath("reqs.txt").write_text(
f"{project_dir} --config-settings FOO=Hello"
)
script.pip("install", "-r", "reqs.txt")
config = script.site_packages_path / f"{name}-config.json"
with open(config, "rb") as f:
assert json.load(f) == {"FOO": "Hello"}
def test_install_editable_sees_config(script: PipTestEnvironment) -> None:
name, _, project_dir = make_project(script.scratch_path)
script.pip(
"install",
"--config-settings",
"FOO=Hello",
"--editable",
project_dir,
)
config = script.site_packages_path / f"{name}-config.json"
with open(config, "rb") as f:
assert json.load(f) == {"FOO": "Hello"}
def test_install_config_reqs(script: PipTestEnvironment) -> None:
name, _, project_dir = make_project(script.scratch_path)
a_sdist = create_basic_sdist_for_package(
script,
"foo",
"1.0",
{"pyproject.toml": PYPROJECT_TOML, "backend/dummy_backend.py": BACKEND_SRC},
)
script.scratch_path.joinpath("reqs.txt").write_text(
f'{project_dir} --config-settings "--build-option=--cffi" '
'--config-settings "--build-option=--avx2" '
"--config-settings FOO=BAR"
)
script.pip("install", "--no-index", "-f", str(a_sdist.parent), "-r", "reqs.txt")
script.assert_installed(foo="1.0")
config = script.site_packages_path / f"{name}-config.json"
with open(config, "rb") as f:
assert json.load(f) == {"--build-option": ["--cffi", "--avx2"], "FOO": "BAR"}
| Backend |
python | FactoryBoy__factory_boy | tests/test_declarations.py | {
"start": 1787,
"end": 2996
} | class ____(unittest.TestCase):
def test_standard(self):
a = declarations.SelfAttribute('foo.bar.baz')
self.assertEqual(0, a.depth)
self.assertEqual('foo.bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_dot(self):
a = declarations.SelfAttribute('.bar.baz')
self.assertEqual(1, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_default(self):
a = declarations.SelfAttribute('bar.baz', 42)
self.assertEqual(0, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(42, a.default)
def test_parent(self):
a = declarations.SelfAttribute('..bar.baz')
self.assertEqual(2, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_grandparent(self):
a = declarations.SelfAttribute('...bar.baz')
self.assertEqual(3, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
| SelfAttributeTestCase |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 45559,
"end": 46855
} | class ____(test_util.TensorFlowTestCase):
# Basic NextAfter tests that replicate numpy nextafter tests.
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
one = constant_op.constant([1], dtype=dtype)
two = constant_op.constant([2], dtype=dtype)
zero = constant_op.constant([0], dtype=dtype)
nan = constant_op.constant([np.nan], dtype=dtype)
eps = constant_op.constant([np.finfo(dtype.as_numpy_dtype).eps],
dtype=dtype)
self.assertAllEqual(math_ops.nextafter(one, two) - one, eps)
self.assertAllLess(math_ops.nextafter(one, zero) - one, 0)
self.assertAllEqual(math_ops.is_nan(math_ops.nextafter(nan, one)), [True])
self.assertAllEqual(math_ops.is_nan(math_ops.nextafter(one, nan)), [True])
self.assertAllEqual(math_ops.nextafter(one, one), one)
def testBroadcasting(self):
for dtype in [dtypes.float32, dtypes.float64]:
one = constant_op.constant([1, 1], dtype=dtype)
two = constant_op.constant([2], dtype=dtype)
eps = np.finfo(dtype.as_numpy_dtype).eps
eps_const = constant_op.constant([eps, eps], dtype=dtype)
self.assertAllEqual(math_ops.nextafter(one, two) - one, eps_const)
@test_util.run_all_in_graph_and_eager_modes
| NextAfterTest |
python | bokeh__bokeh | src/bokeh/models/glyph.py | {
"start": 2712,
"end": 3013
} | class ____(XYGlyph):
''' Base class of glyphs with `x` and `y` coordinate attributes and
a radius specification.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| RadialGlyph |
python | lxml__lxml | src/lxml/tests/test_css.py | {
"start": 236,
"end": 1808
} | class ____(HelperTestCase):
pytestmark = skipif('cssselect is None')
def test_cssselect(self):
div, = lxml.html.fromstring(HTML).xpath('//div')
def count(selector, expected_count, **kwargs):
result = div.cssselect(selector, **kwargs)
self.assertEqual(len(result), expected_count)
count('div', 1)
count('a', 2)
count('em', 0)
# Element names are case-insensitive in HTML
count('DIV', 1)
# ... but not in XHTML or XML
count('DIV', 0, translator='xhtml')
count('DIV', 0, translator='xml')
# :contains() is case-insensitive in lxml
count(':contains("link")', 2) # div, a
count(':contains("LInk")', 2)
# Whatever the document language
count(':contains("LInk")', 2, translator='xhtml')
count(':contains("LInk")', 2, translator='xml')
# ... but not in upstream cssselect
import cssselect
count(':contains("link")', 2, translator=cssselect.HTMLTranslator())
count(':contains("LInk")', 0, translator=cssselect.HTMLTranslator())
def test_suite():
suite = unittest.TestSuite()
try:
import cssselect
except ImportError:
# no 'cssselect' installed
print("Skipping tests in lxml.cssselect - external cssselect package is not installed")
return suite
import lxml.cssselect
suite.addTests(doctest.DocTestSuite(lxml.cssselect))
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(CSSTestCase)])
return suite
| CSSTestCase |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 18078,
"end": 20003
} | class ____(nn.Module):
"""
Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper:
"""
def __init__(self, config):
super().__init__()
self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
def forward(self, inputs, positions):
_, _, dim = inputs.size()
index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim]
gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim]
query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim]
query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim]
start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim]
end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim]
hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim]
start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length]
start_logits = torch.matmul(hidden_states, start_reps)
hidden_states = self.end_classifier(query_end_reps)
end_reps = end_reps.permute(0, 2, 1)
end_logits = torch.matmul(hidden_states, end_reps)
return start_logits, end_logits
@auto_docstring
| QuestionAwareSpanSelectionHead |
python | bottlepy__bottle | test/test_stpl.py | {
"start": 225,
"end": 10014
} | class ____(unittest.TestCase):
def assertRenders(self, tpl, to, *args, **vars):
if isinstance(tpl, str):
tpl = SimpleTemplate(tpl, lookup=[os.path.join(os.path.dirname(__file__), 'views')])
self.assertEqual(touni(to), tpl.render(*args, **vars))
def test_string(self):
""" Templates: Parse string"""
self.assertRenders('start {{var}} end', 'start var end', var='var')
def test_self_as_variable_name(self):
self.assertRenders('start {{self}} end', 'start var end', {'self':'var'})
def test_file(self):
with chdir(__file__):
t = SimpleTemplate(name='./views/stpl_simple.tpl', lookup=['.'])
self.assertRenders(t, 'start var end\n', var='var')
def test_name(self):
with chdir(__file__):
t = SimpleTemplate(name='stpl_simple', lookup=['./views/'])
self.assertRenders(t, 'start var end\n', var='var')
def test_unicode(self):
self.assertRenders('start {{var}} end', 'start äöü end', var=touni('äöü'))
self.assertRenders('start {{var}} end', 'start äöü end', var=tob('äöü'))
def test_unicode_code(self):
""" Templates: utf8 code in file"""
with chdir(__file__):
t = SimpleTemplate(name='./views/stpl_unicode.tpl', lookup=['.'])
self.assertRenders(t, 'start ñç äöü end\n', var=touni('äöü'))
def test_import(self):
""" Templates: import statement"""
t = '%from base64 import b64encode\nstart {{b64encode(var.encode("ascii") if hasattr(var, "encode") else var)}} end'
self.assertRenders(t, 'start dmFy end', var='var')
def test_data(self):
""" Templates: Data representation """
t = SimpleTemplate('<{{var}}>')
self.assertRenders('<{{var}}>', '<True>', var=True)
self.assertRenders('<{{var}}>', '<False>', var=False)
self.assertRenders('<{{var}}>', '<>', var=None)
self.assertRenders('<{{var}}>', '<0>', var=0)
self.assertRenders('<{{var}}>', '<5>', var=5)
self.assertRenders('<{{var}}>', '<b>', var=tob('b'))
self.assertRenders('<{{var}}>', '<1.0>', var=1.0)
self.assertRenders('<{{var}}>', '<[1, 2]>', var=[1,2])
def test_htmlutils_quote(self):
self.assertEqual('"<' 	"\\>"', html_quote('<\'\r\n\t"\\>'));
def test_escape(self):
self.assertRenders('<{{var}}>', '<b>', var='b')
self.assertRenders('<{{var}}>', '<<&>>',var='<&>')
def test_noescape(self):
self.assertRenders('<{{!var}}>', '<b>', var='b')
self.assertRenders('<{{!var}}>', '<<&>>', var='<&>')
def test_noescape_setting(self):
t = SimpleTemplate('<{{var}}>', noescape=True)
self.assertRenders(t, '<b>', var='b')
self.assertRenders(t, '<<&>>', var='<&>')
t = SimpleTemplate('<{{!var}}>', noescape=True)
self.assertRenders(t, '<b>', var='b')
self.assertRenders(t, '<<&>>', var='<&>')
def test_blocks(self):
""" Templates: Code blocks and loops """
t = "start\n%for i in l:\n{{i}} \n%end\nend"
self.assertRenders(t, 'start\n1 \n2 \n3 \nend', l=[1,2,3])
self.assertRenders(t, 'start\nend', l=[])
t = "start\n%if i:\n{{i}} \n%end\nend"
self.assertRenders(t, 'start\nTrue \nend', i=True)
self.assertRenders(t, 'start\nend', i=False)
def test_elsebug(self):
''' Whirespace between block keyword and colon is allowed '''
self.assertRenders("%if 1:\nyes\n%else:\nno\n%end\n", "yes\n")
self.assertRenders("%if 1:\nyes\n%else :\nno\n%end\n", "yes\n")
def test_commentbug(self):
''' A "#" sign within an string is not a comment '''
self.assertRenders("%if '#':\nyes\n%end\n", "yes\n")
def test_multiline(self):
''' Block statements with non-terminating newlines '''
self.assertRenders("%if 1\\\n%and 1:\nyes\n%end\n", "yes\n")
def test_newline_in_parameterlist(self):
''' Block statements with non-terminating newlines in list '''
self.assertRenders("%a=[1,\n%2]\n{{len(a)}}", "2")
def test_dedentbug(self):
''' One-Line dednet blocks should not change indention '''
t = '%if x: a="if"\n%else: a="else"\n%end\n{{a}}'
self.assertRenders(t, "if", x=True)
self.assertRenders(t, "else", x=False)
t = '%if x:\n%a="if"\n%else: a="else"\n%end\n{{a}}'
self.assertRenders(t, "if", x=True)
self.assertRenders(t, "else", x=False)
t = SimpleTemplate('%if x: a="if"\n%else: a="else"\n%end')
self.assertRaises(NameError, t.render)
def test_onelinebugs(self):
''' One-Line blocks should not change indention '''
t = '%if x:\n%a=1\n%end\n{{a}}'
self.assertRenders(t, "1", x=True)
t = '%if x: a=1; end\n{{a}}'
self.assertRenders(t, "1", x=True)
t = '%if x:\n%a=1\n%else:\n%a=2\n%end\n{{a}}'
self.assertRenders(t, "1", x=True)
self.assertRenders(t, "2", x=False)
t = '%if x: a=1\n%else:\n%a=2\n%end\n{{a}}'
self.assertRenders(t, "1", x=True)
self.assertRenders(t, "2", x=False)
t = '%if x:\n%a=1\n%else: a=2; end\n{{a}}'
self.assertRenders(t, "1", x=True)
self.assertRenders(t, "2", x=False)
t = '%if x: a=1\n%else: a=2; end\n{{a}}'
self.assertRenders(t, "1", x=True)
self.assertRenders(t, "2", x=False)
def test_onelineblocks(self):
""" Templates: one line code blocks """
t = "start\n%a=''\n%for i in l: a += str(i); end\n{{a}}\nend"
self.assertRenders(t, 'start\n123\nend', l=[1,2,3])
self.assertRenders(t, 'start\n\nend', l=[])
def test_escaped_codelines(self):
self.assertRenders('\\% test', '% test')
self.assertRenders('\\%% test', '%% test')
self.assertRenders(' \\% test', ' % test')
def test_nobreak(self):
""" Templates: Nobreak statements"""
self.assertRenders("start\\\\\n%pass\nend", 'startend')
def test_nonobreak(self):
""" Templates: Escaped nobreak statements"""
self.assertRenders("start\\\\\n\\\\\n%pass\nend", 'start\\\\\nend')
def test_include(self):
""" Templates: Include statements"""
with chdir(__file__):
t = SimpleTemplate(name='stpl_include', lookup=['./views/'])
self.assertRenders(t, 'before\nstart var end\nafter\n', var='var')
def test_rebase(self):
""" Templates: %rebase and method passing """
with chdir(__file__):
t = SimpleTemplate(name='stpl_t2main', lookup=['./views/'])
result='+base+\n+main+\n!1234!\n+include+\n-main-\n+include+\n-base-\n'
self.assertRenders(t, result, content='1234')
def test_get(self):
self.assertRenders('{{get("x", "default")}}', '1234', x='1234')
self.assertRenders('{{get("x", "default")}}', 'default')
def test_setdefault(self):
t = '%setdefault("x", "default")\n{{x}}'
self.assertRenders(t, '1234', x='1234')
self.assertRenders(t, 'default')
def test_defnied(self):
self.assertRenders('{{x if defined("x") else "no"}}', 'yes', x='yes')
self.assertRenders('{{x if defined("x") else "no"}}', 'no')
def test_notfound(self):
""" Templates: Unavailable templates"""
self.assertRaises(TemplateError, SimpleTemplate, name="abcdef", lookup=['.'])
def test_error(self):
""" Templates: Exceptions"""
self.assertRaises(SyntaxError, lambda: SimpleTemplate('%for badsyntax').co)
self.assertRaises(IndexError, SimpleTemplate('{{i[5]}}', lookup=['.']).render, i=[0])
def test_winbreaks(self):
""" Templates: Test windows line breaks """
self.assertRenders('%var+=1\r\n{{var}}\r\n', '6\r\n', var=5)
def test_winbreaks_end_bug(self):
d = { 'test': [ 1, 2, 3 ] }
self.assertRenders('%for i in test:\n{{i}}\n%end\n', '1\n2\n3\n', **d)
self.assertRenders('%for i in test:\n{{i}}\r\n%end\n', '1\r\n2\r\n3\r\n', **d)
self.assertRenders('%for i in test:\r\n{{i}}\n%end\r\n', '1\n2\n3\n', **d)
self.assertRenders('%for i in test:\r\n{{i}}\r\n%end\r\n', '1\r\n2\r\n3\r\n', **d)
def test_commentonly(self):
""" Templates: Commentd should behave like code-lines (e.g. flush text-lines) """
t = SimpleTemplate('...\n%#test\n...')
self.assertNotEqual('#test', t.code.splitlines()[0])
def test_template_shortcut(self):
result = template('start {{var}} end', var='middle')
self.assertEqual(touni('start middle end'), result)
def test_view_decorator(self):
@view('start {{var}} end')
def test():
return dict(var='middle')
self.assertEqual(touni('start middle end'), test())
def test_view_decorator_issue_407(self):
with chdir(__file__):
@view('stpl_no_vars')
def test():
pass
self.assertEqual(touni('hihi'), test())
@view('aaa {{x}}', x='bbb')
def test2():
pass
self.assertEqual(touni('aaa bbb'), test2())
def test_global_config(self):
SimpleTemplate.global_config('meh', 1)
t = SimpleTemplate('anything')
self.assertEqual(touni('anything'), t.render())
def test_bug_no_whitespace_before_stmt(self):
self.assertRenders('\n{{var}}', '\nx', var='x')
def test_bug_block_keywords_eat_prefixed_code(self):
''' #595: Everything before an 'if' statement is removed, resulting in
SyntaxError. '''
tpl = "% m = 'x' if True else 'y'\n{{m}}"
self.assertRenders(tpl, 'x')
| TestSimpleTemplate |
python | joke2k__faker | faker/providers/phone_number/es_ES/__init__.py | {
"start": 67,
"end": 2312
} | class ____(PhoneNumberProvider):
"""
According to official specs:
https://avancedigital.mineco.gob.es/es-ES/Servicios/Numeracion/Documents/Guia_Numeracion.pdf
"""
PREFIXES = (
"6##",
"70#",
"71#",
"72#",
"73#",
"74#",
# 75-79 unassigned
"800",
"803",
"806",
"807",
# 801, 802, 804, 805, 808, 809 unassigned
"81#",
"820",
"821",
"822",
"823",
"824",
"825",
"826",
"827",
"828",
# 829 unassigned
"83#",
# 840 unassigned
"841",
"842",
"843",
"844",
"845",
"846",
"847",
"848",
"849",
"85#",
"86#",
# 870 unassigned
"871",
"872",
"873",
"874",
"875",
"876",
"877",
"878",
"879",
"880",
"881",
"882",
"883",
"884",
"885",
"886",
"887",
"888",
# 889-899 unassigned
"900",
"901",
"902",
# 903-909 unassigned
"91#",
"920",
"921",
"922",
"923",
"924",
"925",
"926",
"927",
"928",
# 929 unassigned
"93#",
# 940 unassigned
"941",
"942",
"943",
"944",
"945",
"946",
"947",
"948",
"949",
"95#",
"96#",
# 970 unassigned
"971",
"972",
"973",
"974",
"975",
"976",
"977",
"978",
"979",
"980",
"981",
"982",
"983",
"984",
"985",
"986",
"987",
"988",
# 989-999 unassigned
)
PHONE_FORMATS = (
"+34 xxx ### ###",
"+34 xxx######",
"+34 xxx ## ## ##",
"+34xxx ### ###",
"+34xxx######",
"+34xxx ## ## ##",
)
formats = tuple(
phone_format.replace("xxx", prefix) for (prefix, phone_format) in itertools.product(PREFIXES, PHONE_FORMATS)
)
| Provider |
python | doocs__leetcode | solution/1400-1499/1452.People Whose List of Favorite Companies Is Not a Subset of Another List/Solution.py | {
"start": 0,
"end": 580
} | class ____:
def peopleIndexes(self, favoriteCompanies: List[List[str]]) -> List[int]:
idx = 0
d = {}
n = len(favoriteCompanies)
nums = [set() for _ in range(n)]
for i, ss in enumerate(favoriteCompanies):
for s in ss:
if s not in d:
d[s] = idx
idx += 1
nums[i].add(d[s])
ans = []
for i in range(n):
if not any(i != j and (nums[i] & nums[j]) == nums[i] for j in range(n)):
ans.append(i)
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 700541,
"end": 701011
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of MoveProjectCard"""
__schema__ = github_schema
__field_names__ = ("card_edge", "client_mutation_id")
card_edge = sgqlc.types.Field("ProjectCardEdge", graphql_name="cardEdge")
"""The new edge of the moved card."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| MoveProjectCardPayload |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/packages.py | {
"start": 2392,
"end": 2969
} | class ____(NamedTuple):
provider_id: str
provider_yaml_path: Path
source_date_epoch: int
full_package_name: str
pypi_package_name: str
root_provider_path: Path
base_provider_package_path: Path
documentation_provider_distribution_path: Path
possible_old_provider_paths: list[Path]
changelog_path: Path
provider_description: str
dependencies: list[str]
versions: list[str]
excluded_python_versions: list[str]
plugins: list[PluginInfo]
removed: bool
extra_project_metadata: str | None = None
| ProviderPackageDetails |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis29.py | {
"start": 315,
"end": 1476
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis29.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<a:defRPr"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [145169024, 145176448]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
chart.set_x_axis({"num_font": {"rotation": -90}})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | test/test_overrides.py | {
"start": 39097,
"end": 42934
} | class ____:
"Basic data container that knows how to unwrap itself"
def __init__(self, data):
self.__dict__["_data"] = data
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
val = getattr(self._data, name)
# If it's a method
if not isinstance(val, torch.device) and callable(val):
c = getattr(type(self._data), name)
# Don't append self to args if classmethod/staticmethod
if c is val:
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))
# Otherwise append self to args
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))
return wrap(val)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
self.used_attrs.add(name)
setattr(self._data, name, unwrap(value))
def __setitem__(self, key, value):
self._data[unwrap(key)] = unwrap(value)
def __getitem__(self, key):
return wrap(self._data[unwrap(key)])
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# Find an instance of this class in the arguments
args_of_this_cls = []
for a in args:
if isinstance(a, cls):
args_of_this_cls.append(a)
elif isinstance(a, collections.abc.Sequence):
args_of_this_cls.extend(el for el in a if isinstance(el, cls))
assert len(args_of_this_cls) > 0
for a in args_of_this_cls:
a.used_calls.add(func)
args = unwrap(tuple(args))
kwargs = {k: unwrap(v) for k, v in kwargs.items()}
return wrap(func(*args, **kwargs))
def __add__(self, other):
return self.__torch_function__(torch.add, (Wrapper,), (self, other))
def __mul__(self, other):
return self.__torch_function__(torch.mul, (Wrapper,), (self, other))
def __sub__(self, other):
return self.__torch_function__(torch.sub, (Wrapper,), (self, other))
def __truediv__(self, other):
return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))
def __floordiv__(self, other):
return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))
def __ge__(self, other):
return self.__torch_function__(torch.ge, (Wrapper,), (self, other))
def __gt__(self, other):
return self.__torch_function__(torch.gt, (Wrapper,), (self, other))
def __lt__(self, other):
return self.__torch_function__(torch.lt, (Wrapper,), (self, other))
def __le__(self, other):
return self.__torch_function__(torch.le, (Wrapper,), (self, other))
def __eq__(self, other):
return self.__torch_function__(torch.eq, (Wrapper,), (self, other))
def __ne__(self, other):
return self.__torch_function__(torch.ne, (Wrapper,), (self, other))
def __bool__(self):
return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))
def __int__(self):
return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))
def __len__(self):
return len(self._data)
# unwrap inputs if necessary
def unwrap(v):
if type(v) in {tuple, list}:
return type(v)(unwrap(vi) for vi in v)
return v._data if isinstance(v, Wrapper) else v
# wrap inputs if necessary
def wrap(v):
if type(v) in {tuple, list}:
return type(v)(wrap(vi) for vi in v)
return Wrapper(v) if isinstance(v, torch.Tensor) else v
| Wrapper |
python | apache__thrift | test/crossrunner/report.py | {
"start": 2367,
"end": 3566
} | class ____(object):
# Unfortunately, standard library doesn't handle timezone well
# DATETIME_FORMAT = '%a %b %d %H:%M:%S %Z %Y'
DATETIME_FORMAT = '%a %b %d %H:%M:%S %Y'
def __init__(self):
self._log = multiprocessing.get_logger()
self._lock = multiprocessing.Lock()
@classmethod
def test_logfile(cls, test_name, prog_kind, dir=None):
relpath = os.path.join('log', '%s_%s.log' % (test_name, prog_kind))
return relpath if not dir else os.path.realpath(os.path.join(dir, relpath))
def _start(self):
self._start_time = time.time()
@property
def _elapsed(self):
return time.time() - self._start_time
@classmethod
def _format_date(cls):
return '%s' % datetime.datetime.now().strftime(cls.DATETIME_FORMAT)
def _print_date(self):
print(self._format_date(), file=self.out)
def _print_bar(self, out=None):
print(
'===============================================================================',
file=(out or self.out))
def _print_exec_time(self):
print('Test execution took {:.1f} seconds.'.format(self._elapsed), file=self.out)
| TestReporter |
python | streamlit__streamlit | lib/tests/streamlit/elements/help_test.py | {
"start": 1221,
"end": 10532
} | class ____(DeltaGeneratorTestCase):
"""Test st.help."""
def test_no_arg(self):
"""When st.help is called with no arguments, show Streamlit docs."""
with patch_varname_getter():
st.help()
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == ""
assert ds.value == "streamlit"
assert ds.type == "module"
assert ds.doc_string.startswith("Streamlit.")
def test_none_arg(self):
"""When st.help is called with None as an argument, don't show Streamlit docs."""
with patch_varname_getter():
st.help(None)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == ""
assert ds.value == "None"
assert ds.type == "NoneType"
import sys
if sys.version_info >= (3, 13):
assert ds.doc_string == "The type of the None singleton."
else:
assert ds.doc_string == ""
def test_basic_func_with_doc(self):
"""Test basic function with docstring."""
def my_func(some_param, another_param=123):
"""This is the doc"""
pass
with patch_varname_getter():
st.help(my_func)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "my_func"
assert ds.value == (
"tests.streamlit.elements.help_test.StHelpTest.test_basic_func_with_doc.<locals>.my_func(some_param, "
"another_param=123)"
)
assert ds.type == "function"
assert ds.doc_string == "This is the doc"
def test_basic_func_without_doc(self):
"""Test basic function without docstring."""
def my_func(some_param, another_param=123):
pass
with patch_varname_getter():
st.help(my_func)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "my_func"
assert ds.value == (
"tests.streamlit.elements.help_test.StHelpTest.test_basic_func_without_doc.<locals>.my_func(some_param, "
"another_param=123)"
)
assert ds.type == "function"
assert ds.doc_string == ""
def test_deltagenerator_func(self):
"""Test Streamlit DeltaGenerator function."""
with patch_varname_getter():
st.help(st.audio)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "st.audio"
assert ds.type == "method"
signature = (
"(data: 'MediaData', format: 'str' = 'audio/wav', start_time: 'MediaTime' = 0, *, "
"sample_rate: 'int | None' = None, end_time: 'MediaTime | None' = None, loop: 'bool' = False, "
"autoplay: 'bool' = False, width: 'WidthWithoutContent' = 'stretch') -> 'DeltaGenerator'"
)
assert f"streamlit.delta_generator.MediaMixin.audio{signature}" == ds.value
assert ds.doc_string.startswith("Display an audio player")
def test_builtin_func(self):
"""Test a built-in function."""
with patch_varname_getter():
st.help(dir)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "dir"
assert ds.value == "builtins.dir(...)"
assert ds.type == "builtin_function_or_method"
assert len(ds.doc_string) > 0
def test_varname(self):
"""Test a named variable."""
myvar = 123
with patch_varname_getter():
st.help(myvar)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "myvar"
assert ds.value == "123"
assert ds.type == "int"
assert len(ds.doc_string) > 0
def test_walrus(self):
"""Test a named variable using walrus operator."""
with patch_varname_getter():
st.help(myvar := 123) # noqa: F841
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "myvar"
assert ds.value == "123"
assert ds.type == "int"
assert len(ds.doc_string) > 0
def test_complex_var(self):
"""Test complex dict-list-object combination."""
myvar = {"foo": [None, {"bar": "baz"}]}
with patch_varname_getter():
st.help(myvar["foo"][1]["bar"].strip)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == 'myvar["foo"][1]["bar"].strip'
assert ds.value == r"str.strip(chars=None, /)"
assert ds.type == "builtin_function_or_method"
assert len(ds.doc_string) > 0
def test_builtin_obj(self):
"""Test a built-in function."""
with patch_varname_getter():
st.help(123)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == ""
assert ds.value == "123"
assert ds.type == "int"
assert len(ds.doc_string) > 0
def test_doc_defined_for_type(self):
"""When the docs are defined for the type on an object, but not
the object, we expect the docs of the type. This is the case
of ndarray generated as follow.
"""
array = np.arange(1)
with patch_varname_getter():
st.help(array)
ds = self.get_delta_from_queue().new_element.doc_string
assert ds.name == "array"
assert ds.value == "array([0])"
assert ds.type == "ndarray"
assert "ndarray" in ds.doc_string
def test_passing_a_class(self):
"""When the object is a class and no docs are defined,
we expect docs to be None."""
class MyClass:
pass
with patch_varname_getter():
st.help(MyClass)
ds = self.get_delta_from_queue().new_element.doc_string
assert type(MyClass) is type
assert ds.name == "MyClass"
assert (
ds.value
== "tests.streamlit.elements.help_test.StHelpTest.test_passing_a_class.<locals>.MyClass()"
)
assert ds.type == "class"
assert ds.doc_string == ""
def test_passing_an_instance(self):
"""When the type of the object is type and no docs are defined,
we expect docs to be None."""
class MyClass:
pass
with patch_varname_getter():
st.help(MyClass)
ds = self.get_delta_from_queue().new_element.doc_string
assert type(MyClass) is type
assert ds.name == "MyClass"
assert (
ds.value
== "tests.streamlit.elements.help_test.StHelpTest.test_passing_an_instance.<locals>.MyClass()"
)
assert ds.type == "class"
assert ds.doc_string == ""
def test_class_members(self):
class MyClass:
a = 1
b = 2
def __init__(self):
self.c = 3
self.d = 4
@property
def e(self):
"Property e"
return 5
@staticmethod
def staticmethod1(x=10):
"Static method 1"
@classmethod
def classmethod1(self, y=20):
"Class method 1"
with patch_varname_getter():
st.help(MyClass)
ds = self.get_delta_from_queue().new_element.doc_string
assert len(ds.members) == 5
expected_outputs = [
("a", "1", "", "int"),
("b", "2", "", "int"),
("e", "", "Property e", "property"),
("classmethod1", "", "Class method 1", "method"),
("staticmethod1", "", "Static method 1", "function"),
]
for i, expected in enumerate(expected_outputs):
assert ds.members[i].name == expected[0]
assert ds.members[i].value == expected[1]
assert ds.members[i].doc_string == expected[2]
assert ds.members[i].type == expected[3]
def test_instance_members(self):
class MyClass:
a = 1
b = 2
def __init__(self):
self.c = 3
self.d = 4
@property
def e(self):
"Property e"
return 5
@staticmethod
def staticmethod1(x=10):
"Static method 1"
@classmethod
def classmethod1(self, y=20):
"Class method 1"
my_instance = MyClass()
with patch_varname_getter():
st.help(my_instance)
ds = self.get_delta_from_queue().new_element.doc_string
assert len(ds.members) == 7
expected_outputs = [
("a", "1", "", "int"),
("b", "2", "", "int"),
("c", "3", "", "int"),
("d", "4", "", "int"),
("e", "", "Property e", "property"),
("classmethod1", "", "Class method 1", "method"),
("staticmethod1", "", "Static method 1", "function"),
]
for i, expected in enumerate(expected_outputs):
assert ds.members[i].name == expected[0]
assert ds.members[i].value == expected[1]
assert ds.members[i].doc_string == expected[2]
assert ds.members[i].type == expected[3]
st_calls = [
"st.help({0})",
"st.write({0})",
]
| StHelpTest |
python | pytransitions__transitions | transitions/extensions/diagrams_mermaid.py | {
"start": 5471,
"end": 9593
} | class ____(Graph):
"""Graph creation support for transitions.extensions.nested.HierarchicalGraphMachine."""
def __init__(self, *args, **kwargs):
self._cluster_states = []
super(NestedGraph, self).__init__(*args, **kwargs)
def set_node_style(self, state, style):
for state_name in self._get_state_names(state):
super(NestedGraph, self).set_node_style(state_name, style)
def set_previous_transition(self, src, dst):
self.custom_styles["edge"][src][dst] = "previous"
self.set_node_style(src, "previous")
def _add_nodes(self, states, container):
self._add_nested_nodes(states, container, prefix="", default_style="default")
def _add_nested_nodes(self, states, container, prefix, default_style):
for state in states:
name = prefix + state["name"]
container.append("state \"{}\" as {}".format(self._convert_state_attributes(state), name))
if state.get("final", False):
container.append("{} --> [*]".format(name))
if not prefix:
container.append("Class {} s_{}".format(name.replace(" ", ""),
self.custom_styles["node"][name] or default_style))
if state.get("children", None) is not None:
container.append("state {} {{".format(name))
self._cluster_states.append(name)
# with container.subgraph(name=cluster_name, graph_attr=attr) as sub:
initial = state.get("initial", "")
is_parallel = isinstance(initial, list)
if is_parallel:
for child in state["children"]:
self._add_nested_nodes(
[child],
container,
default_style="parallel",
prefix=prefix + state["name"] + self.machine.state_cls.separator,
)
container.append("--")
if state["children"]:
container.pop()
else:
if initial:
container.append("[*] --> {}".format(
prefix + state["name"] + self.machine.state_cls.separator + initial))
self._add_nested_nodes(
state["children"],
container,
default_style="default",
prefix=prefix + state["name"] + self.machine.state_cls.separator,
)
container.append("}")
def _add_edges(self, transitions, container):
edges_attr = defaultdict(lambda: defaultdict(dict))
for transition in transitions:
# enable customizable labels
src = transition["source"]
dst = transition.get("dest", src)
if edges_attr[src][dst]:
attr = edges_attr[src][dst]
attr["label"] = " | ".join(
[edges_attr[src][dst]["label"], self._transition_label(transition)]
)
else:
edges_attr[src][dst] = self._create_edge_attr(src, dst, transition)
for custom_src, dests in self.custom_styles["edge"].items():
for custom_dst, style in dests.items():
if style and (
custom_src not in edges_attr or custom_dst not in edges_attr[custom_src]
):
edges_attr[custom_src][custom_dst] = self._create_edge_attr(
custom_src, custom_dst, {"trigger": "", "dest": ""}
)
for src, dests in edges_attr.items():
for dst, attr in dests.items():
if not attr["label"]:
continue
container.append("{source} --> {dest}: {label}".format(**attr))
def _create_edge_attr(self, src, dst, transition):
return {"source": src, "dest": dst, "label": self._transition_label(transition)}
| NestedGraph |
python | Textualize__rich | rich/_log_render.py | {
"start": 308,
"end": 3213
} | class ____:
def __init__(
self,
show_time: bool = True,
show_level: bool = False,
show_path: bool = True,
time_format: Union[str, FormatTimeCallable] = "[%x %X]",
omit_repeated_times: bool = True,
level_width: Optional[int] = 8,
) -> None:
self.show_time = show_time
self.show_level = show_level
self.show_path = show_path
self.time_format = time_format
self.omit_repeated_times = omit_repeated_times
self.level_width = level_width
self._last_time: Optional[Text] = None
def __call__(
self,
console: "Console",
renderables: Iterable["ConsoleRenderable"],
log_time: Optional[datetime] = None,
time_format: Optional[Union[str, FormatTimeCallable]] = None,
level: TextType = "",
path: Optional[str] = None,
line_no: Optional[int] = None,
link_path: Optional[str] = None,
) -> "Table":
from .containers import Renderables
from .table import Table
output = Table.grid(padding=(0, 1))
output.expand = True
if self.show_time:
output.add_column(style="log.time")
if self.show_level:
output.add_column(style="log.level", width=self.level_width)
output.add_column(ratio=1, style="log.message", overflow="fold")
if self.show_path and path:
output.add_column(style="log.path")
row: List["RenderableType"] = []
if self.show_time:
log_time = log_time or console.get_datetime()
time_format = time_format or self.time_format
if callable(time_format):
log_time_display = time_format(log_time)
else:
log_time_display = Text(log_time.strftime(time_format))
if log_time_display == self._last_time and self.omit_repeated_times:
row.append(Text(" " * len(log_time_display)))
else:
row.append(log_time_display)
self._last_time = log_time_display
if self.show_level:
row.append(level)
row.append(Renderables(renderables))
if self.show_path and path:
path_text = Text()
path_text.append(
path, style=f"link file://{link_path}" if link_path else ""
)
if line_no:
path_text.append(":")
path_text.append(
f"{line_no}",
style=f"link file://{link_path}#{line_no}" if link_path else "",
)
row.append(path_text)
output.add_row(*row)
return output
if __name__ == "__main__": # pragma: no cover
from rich.console import Console
c = Console()
c.print("[on blue]Hello", justify="right")
c.log("[on blue]hello", justify="right")
| LogRender |
python | wandb__wandb | wandb/sdk/artifacts/_generated/input_types.py | {
"start": 2288,
"end": 2589
} | class ____(GQLInput):
artifact_sequence_id: GQLId = Field(alias="artifactSequenceID")
name: Optional[str] = Field(default=None, max_length=128)
description: Optional[str] = None
client_mutation_id: Optional[str] = Field(alias="clientMutationId", default=None)
| UpdateArtifactSequenceInput |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_model_root.py | {
"start": 593,
"end": 782
} | class ____:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: str
def __init__(self, data):
self.root = data
| RootModel |
python | huggingface__transformers | src/transformers/models/eomt/modeling_eomt.py | {
"start": 15574,
"end": 30355
} | class ____(nn.Module):
def __init__(self, config: EomtConfig, weight_dict: dict[str, float]):
"""
The Eomt Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we
compute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair
of matched ground-truth / prediction (supervise class and mask)
Args:
config (`EomtConfig`):
The configuration for Eomt model also containing loss calculation specific parameters.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
"""
super().__init__()
requires_backends(self, ["scipy"])
self.num_labels = config.num_labels
self.weight_dict = weight_dict
# Weight to apply to the null class
self.eos_coef = config.no_object_weight
empty_weight = torch.ones(self.num_labels + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = config.train_num_points
self.oversample_ratio = config.oversample_ratio
self.importance_sample_ratio = config.importance_sample_ratio
self.matcher = EomtHungarianMatcher(
cost_class=config.class_weight,
cost_dice=config.dice_weight,
cost_mask=config.mask_weight,
num_points=self.num_points,
)
def _max_by_axis(self, sizes: list[list[int]]) -> list[int]:
maxes = sizes[0]
for sublist in sizes[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
# Adapted from nested_tensor_from_tensor_list() in original implementation
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
# get the maximum size in the batch
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
# compute final size
batch_shape = [len(tensors)] + max_size
batch_size, _, height, width = batch_shape
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
# pad the tensors to the size of the biggest one
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor)
padding_mask[: tensor.shape[1], : tensor.shape[2]] = False
return padded_tensors, padding_masks
def loss_labels(
self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]
) -> dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices) # shape of (batch_size, num_queries)
target_classes_o = torch.cat(
[target[j] for target, (_, j) in zip(class_labels, indices)]
) # shape of (batch_size, num_queries)
target_classes = torch.full(
(batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device
)
target_classes[idx] = target_classes_o
# Permute target_classes (batch_size, num_queries, num_labels) -> (batch_size, num_labels, num_queries)
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {"loss_cross_entropy": loss_ce}
return losses
def loss_masks(
self,
masks_queries_logits: torch.Tensor,
mask_labels: list[torch.Tensor],
indices: tuple[np.array],
num_masks: int,
) -> dict[str, torch.Tensor]:
"""Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
# shape (batch_size * num_queries, height, width)
pred_masks = masks_queries_logits[src_idx]
# shape (batch_size, num_queries, height, width)
# pad all and stack the targets to the num_labels dimension
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
# No need to upsample predictions as we are using normalized coordinates
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
# Sample point coordinates
with torch.no_grad():
point_coordinates = self.sample_points_using_uncertainty(
pred_masks,
lambda logits: self.calculate_uncertainty(logits),
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
point_labels = sample_point(target_masks, point_coordinates, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coordinates, align_corners=False).squeeze(1)
losses = {
"loss_mask": sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks),
"loss_dice": dice_loss(point_logits, point_labels, num_masks),
}
del pred_masks
del target_masks
return losses
def _get_predictions_permutation_indices(self, indices):
# Permute predictions following indices
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for (src, _) in indices])
return batch_indices, predictions_indices
def _get_targets_permutation_indices(self, indices):
# Permute labels following indices
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for (_, tgt) in indices])
return batch_indices, target_indices
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
"""
In Eomt paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
"""
uncertainty_scores = -(torch.abs(logits))
return uncertainty_scores
def sample_points_using_uncertainty(
self,
logits: torch.Tensor,
uncertainty_function,
num_points: int,
oversample_ratio: int,
importance_sample_ratio: float,
) -> torch.Tensor:
"""
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
"""
num_boxes = logits.shape[0]
num_points_sampled = int(num_points * oversample_ratio)
# Get random point coordinates
point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)
# Get sampled prediction value for the point coordinates
point_logits = sample_point(logits, point_coordinates, align_corners=False)
# Calculate the uncertainties based on the sampled prediction values of the points
point_uncertainties = uncertainty_function(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)
idx += shift[:, None]
point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)
if num_random_points > 0:
point_coordinates = torch.cat(
[point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)],
dim=1,
)
return point_coordinates
def forward(
self,
masks_queries_logits: torch.Tensor,
class_queries_logits: torch.Tensor,
mask_labels: list[torch.Tensor],
class_labels: list[torch.Tensor],
auxiliary_predictions: Optional[dict[str, torch.Tensor]] = None,
) -> dict[str, torch.Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
class_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, num_labels)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`EomtConfig`], then it contains the logits from
the inner layers of the EomtMaskedAttentionDecoder.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing three keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid cross_entropy loss on the predicted and ground truth
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
if `use_auxiliary_loss` was set to `true` in [`EomtConfig`], the dictionary contains additional
losses for each auxiliary predictions.
"""
# retrieve the matching between the outputs of the last layer and the labels
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
# compute the average number of target masks for normalization purposes
num_masks = self.get_num_masks(class_labels, device=class_labels[0].device)
# get all the losses
losses: dict[str, Tensor] = {
**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks),
**self.loss_labels(class_queries_logits, class_labels, indices),
}
# in case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs["masks_queries_logits"]
class_queries_logits = aux_outputs["class_queries_logits"]
loss_dict = self.forward(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum(len(classes) for classes in class_labels)
num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_masks = reduce(num_masks)
world_size = PartialState().num_processes
num_masks = torch.clamp(num_masks / world_size, min=1)
return num_masks
| EomtLoss |
python | falconry__falcon | tests/test_cookies.py | {
"start": 1992,
"end": 2250
} | class ____:
def on_get(self, req, resp):
resp.set_cookie('foo', 'bar', secure=True, partitioned=True)
resp.set_cookie('bar', 'baz', secure=True, partitioned=False)
resp.set_cookie('baz', 'foo', secure=True)
| CookieResourcePartitioned |
python | getsentry__sentry | src/sentry/auth/helper.py | {
"start": 26929,
"end": 39791
} | class ____(Pipeline[AuthProvider, AuthHelperSessionStore]):
"""
Helper class which is passed into AuthView's.
Designed to link provider and views as well as manage the state and
pipeline. The pipeline is optional, in case can be done
Auth has several flows:
1. The user is going through provider setup, thus enforcing that they link
their current account to the new auth identity.
2. The user is anonymous and creating a brand new account.
3. The user is anonymous and logging into an existing account.
4. The user is anonymous and creating a brand new account, but may have an
existing account that should/could be merged.
5. The user is authenticated and creating a new identity, thus associating
it with their current account.
6. The user is authenticated and creating a new identity, but not linking
it with their account (thus creating a new account).
"""
pipeline_name = "pipeline"
provider_model_cls = AuthProvider
session_store_cls = AuthHelperSessionStore
@classmethod
def get_for_request(cls, request: HttpRequest) -> AuthHelper | None:
req_state = cls.unpack_state(request)
if not req_state:
return None
if not req_state.organization:
logging.info("Invalid SSO data found")
return None
# NOTE: pulling custom pipeline state (see get_initial_state)
flow = req_state.state.flow
referrer = req_state.state.referrer
return cls(
auth_provider=req_state.provider_model,
flow=flow,
organization=req_state.organization,
provider_key=req_state.provider_key,
referrer=referrer,
request=request,
)
def __init__(
self,
request: HttpRequest,
organization: RpcOrganization,
flow: int,
auth_provider: AuthProvider | None = None,
provider_key: str | None = None,
referrer: str | None = "in-app",
) -> None:
assert provider_key or auth_provider
self.flow = flow
self.referrer = referrer
# TODO: Resolve inconsistency with nullable provider_key.
# Tagging with "type: ignore" because the superclass requires provider_key to
# be non-nullable. We get away with it because super().__init__ only passes
# provider_key to get_provider, and our get_provider override accepts a null
# provider_key. But it technically violates the type contract and we'll need
# to change the superclass to accommodate this one.
super().__init__(request, provider_key, organization, auth_provider) # type: ignore[arg-type]
# Override superclass's type hints to be narrower
self.organization: RpcOrganization = self.organization
def _get_provider(self, provider_key: str | None) -> Provider:
if self.provider_model:
return self.provider_model.get_provider()
elif provider_key:
return manager.get(provider_key)
else:
raise NotImplementedError
@cached_property
def provider(self) -> Provider:
ret = self._get_provider(self._provider_key)
ret.set_pipeline(self)
ret.update_config(self.config)
return ret
def get_pipeline_views(self) -> Sequence[AuthView]:
assert isinstance(self.provider, Provider)
if self.flow == FLOW_LOGIN:
return self.provider.get_auth_pipeline()
elif self.flow == FLOW_SETUP_PROVIDER:
return self.provider.get_setup_pipeline()
else:
raise NotImplementedError
def is_valid(self) -> bool:
return super().is_valid() and self.state.flow in (FLOW_LOGIN, FLOW_SETUP_PROVIDER)
def get_initial_state(self) -> Mapping[str, Any]:
state = dict(super().get_initial_state())
state.update({"flow": self.flow, "referrer": self.referrer})
return state
def finish_pipeline(self) -> HttpResponseBase:
data = self.fetch_state()
# The state data may have expired, in which case the state data will
# simply be None.
if not data:
return self.error(ERR_INVALID_IDENTITY)
try:
identity = self.provider.build_identity(data)
except IdentityNotValid as error:
return self.error(str(error) or ERR_INVALID_IDENTITY)
if self.state.flow == FLOW_LOGIN:
# create identity and authenticate the user
response = self._finish_login_pipeline(identity)
elif self.state.flow == FLOW_SETUP_PROVIDER:
# Configuring the SSO Auth provider
response = self._finish_setup_pipeline(identity)
else:
raise Exception(f"Unrecognized flow value: {self.state.flow}")
return response
def auth_handler(self, identity: Mapping[str, Any]) -> AuthIdentityHandler:
assert self.provider_model is not None
return AuthIdentityHandler(
auth_provider=self.provider_model,
provider=self.provider,
organization=self.organization,
request=self.request,
identity=identity,
referrer=self.referrer,
)
def _finish_login_pipeline(self, identity: Mapping[str, Any]) -> HttpResponse:
"""
The login flow executes both with anonymous and authenticated users.
Upon completion a few branches exist:
If the identity is already linked, the user should be logged in
and redirected immediately.
Otherwise, the user is presented with a confirmation window. That window
will show them the new account that will be created, and if they're
already authenticated an optional button to associate the identity with
their account.
"""
auth_provider = self.provider_model
assert auth_provider is not None
user_id = identity["id"]
lock = locks.get(
f"sso:auth:{auth_provider.id}:{md5_text(user_id).hexdigest()}",
duration=5,
name="sso_auth",
)
with TimedRetryPolicy(5)(lock.acquire):
try:
auth_identity = AuthIdentity.objects.select_related("user").get(
auth_provider=auth_provider, ident=user_id
)
except AuthIdentity.DoesNotExist:
auth_identity = None
# Handle migration of identity keys
# Context - when google oauth was initially created, the auth_identity key was simply
# the provider email. This can cause issues if the customer changes their domain name,
# and now their email is different and they're locked out of their account.
# This logic updates their id to the provider id instead.
if not auth_identity and isinstance(user_id, MigratingIdentityId):
try:
auth_identity = AuthIdentity.objects.select_related("user").get(
auth_provider=auth_provider, ident=user_id.legacy_id
)
auth_identity.update(ident=user_id.id)
except AuthIdentity.DoesNotExist:
auth_identity = None
auth_handler = self.auth_handler(identity)
if not auth_identity:
return auth_handler.handle_unknown_identity(self.state)
# If the User attached to this AuthIdentity is not active,
# we want to clobber the old account and take it over, rather than
# getting logged into the inactive account.
if not auth_identity.user.is_active:
# Current user is also not logged in, so we have to
# assume unknown.
if not self.request.user.is_authenticated:
return auth_handler.handle_unknown_identity(self.state)
auth_identity = auth_handler.handle_attach_identity()
return auth_handler.handle_existing_identity(self.state, auth_identity)
def _finish_setup_pipeline(self, identity: Mapping[str, Any]) -> HttpResponseRedirect:
"""
the setup flow here is configuring SSO for an organization.
It does that by creating the auth provider as well as an OrgMember identity linked to the active user
"""
request = self.request
if not request.user.is_authenticated:
return self.error(ERR_NOT_AUTHED)
if request.user.id != self.state.uid:
return self.error(ERR_UID_MISMATCH)
data = self.fetch_state()
config = self.provider.build_config(state=data)
om = organization_service.check_membership_by_id(
organization_id=self.organization.id, user_id=request.user.id
)
if om is None:
return self.error(ERR_UID_MISMATCH)
# disable require 2FA for the organization
# since only SSO or require 2FA can be enabled
self.disable_2fa_required()
self.provider_model = AuthProvider.objects.create(
organization_id=self.organization.id, provider=self.provider.key, config=config
)
self.auth_handler(identity).handle_attach_identity(om)
auth.mark_sso_complete(request, self.organization.id)
organization_service.schedule_signal(
sso_enabled,
organization_id=self.organization.id,
args=dict(
user_id=request.user.id,
provider=self.provider.key,
),
)
log_service.record_audit_log(
event=AuditLogEvent(
organization_id=self.organization.id,
actor_user_id=request.user.id,
ip_address=request.META["REMOTE_ADDR"],
target_object_id=self.provider_model.id,
event_id=audit_log.get_event_id("SSO_ENABLE"),
data=self.provider_model.get_audit_log_data(),
)
)
email_missing_links_control.delay(self.organization.id, request.user.id, self.provider.key)
messages.add_message(self.request, messages.SUCCESS, OK_SETUP_SSO)
self.clear_session()
next_uri = reverse(
"sentry-organization-auth-provider-settings", args=[self.organization.slug]
)
return HttpResponseRedirect(next_uri)
def error(self, message: str | _StrPromise) -> HttpResponseRedirect:
redirect_uri = "/"
if self.state.flow == FLOW_LOGIN:
# create identity and authenticate the user
redirect_uri = reverse("sentry-auth-organization", args=[self.organization.slug])
elif self.state.flow == FLOW_SETUP_PROVIDER:
redirect_uri = reverse(
"sentry-organization-auth-settings", args=[self.organization.slug]
)
if redirect_uri == "/":
metrics.incr(
"sso.error",
tags={
"flow": self.state.flow,
"provider": self.provider.key,
},
skip_internal=False,
sample_rate=1.0,
)
else:
metrics.incr(
"sso.exit",
tags={
"flow": self.state.flow,
"provider": self.provider.key,
},
skip_internal=False,
sample_rate=1.0,
)
# NOTE: Does NOT necessarily indicate a login _failure_
logger.warning(
"sso.login-pipeline.error",
extra={
"flow": self.state.flow,
"provider": self.provider.key,
"error_message": message,
"organization_id": self.organization.id if self.organization else None,
},
)
messages.add_message(self.request, messages.ERROR, f"Authentication error: {message}")
return HttpResponseRedirect(redirect_uri)
def disable_2fa_required(self) -> None:
require_2fa = self.organization.flags.require_2fa
if not require_2fa:
return
organization_service.update_flags(
organization_id=self.organization.id,
flags=RpcOrganizationFlagsUpdate(require_2fa=False),
)
logger.info(
"Require 2fa disabled during sso setup", extra={"organization_id": self.organization.id}
)
create_audit_entry(
request=self.request,
organization=self.organization,
target_object=self.organization.id,
event=audit_log.get_event_id("ORG_EDIT"),
data={"require_2fa": "to False when enabling SSO"},
)
CHANNEL_PROVIDER_MAP = {ChannelName.FLY_IO.value: FlyOAuth2Provider}
| AuthHelper |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_organization_uptime_alert_index.py | {
"start": 232,
"end": 377
} | class ____(UptimeAlertBaseEndpointTest):
endpoint = "sentry-api-0-organization-uptime-alert-index"
| OrganizationUptimeAlertIndexBaseEndpointTest |
python | kamyu104__LeetCode-Solutions | Python/missing-number.py | {
"start": 47,
"end": 291
} | class ____(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return reduce(operator.xor, nums,
reduce(operator.xor, xrange(len(nums) + 1)))
| Solution |
python | Textualize__textual | src/textual/demo/home.py | {
"start": 7100,
"end": 8414
} | class ____(PageScreen):
DEFAULT_CSS = """
HomeScreen {
Content {
align-horizontal: center;
& > * {
max-width: 100;
}
margin: 0 1;
overflow-y: auto;
height: 1fr;
scrollbar-gutter: stable;
MarkdownFence {
height: auto;
max-height: initial;
}
Collapsible {
padding-right: 0;
&.-collapsed { padding-bottom: 1; }
}
Markdown {
margin-right: 1;
padding-right: 1;
background: transparent;
}
}
}
"""
def compose(self) -> ComposeResult:
yield StarCount()
with Content():
yield Markdown(WHAT_IS_TEXTUAL_MD)
with Collapsible(title="Welcome", collapsed=False):
yield Markdown(WELCOME_MD)
with Collapsible(title="Textual Interfaces"):
yield Markdown(ABOUT_MD)
with Collapsible(title="Textual API"):
yield Markdown(API_MD)
with Collapsible(title="Deploying Textual apps"):
yield Markdown(DEPLOY_MD)
yield Footer()
| HomeScreen |
python | apache__avro | lang/py/avro/schema.py | {
"start": 23888,
"end": 25822
} | class ____(EqualByJsonMixin, Schema):
def __init__(self, items, names=None, other_props=None, validate_names: bool = True):
# Call parent ctor
Schema.__init__(self, "array", other_props, validate_names=validate_names)
# Add class members
if isinstance(items, str) and names.has_name(items, None):
items_schema = names.get_name(items, None)
else:
try:
items_schema = make_avsc_object(items, names, validate_names=self.validate_names)
except avro.errors.SchemaParseException as e:
fail_msg = f"Items schema ({items}) not a valid Avro schema: {e} (known names: {names.names.keys()})"
raise avro.errors.SchemaParseException(fail_msg)
self.set_prop("items", items_schema)
# read-only properties
@property
def items(self):
return self.get_prop("items")
def match(self, writer):
"""Return True if the current schema (as reader) matches the writer schema.
@arg writer: the schema to match against
@return bool
"""
return self.type == writer.type and self.items.check_props(writer.items, ["type"])
def to_json(self, names=None):
names = names or Names(validate_names=self.validate_names)
to_dump = self.props.copy()
item_schema = self.get_prop("items")
to_dump["items"] = item_schema.to_json(names)
return to_dump
def to_canonical_json(self, names=None):
names = names or Names(validate_names=self.validate_names)
to_dump = self.canonical_properties
item_schema = self.get_prop("items")
to_dump["items"] = item_schema.to_canonical_json(names)
return to_dump
def validate(self, datum):
"""Return self if datum is a valid representation of this schema, else None."""
return self if isinstance(datum, list) else None
| ArraySchema |
python | networkx__networkx | networkx/readwrite/gexf.py | {
"start": 6080,
"end": 9329
} | class ____:
versions = {
"1.1draft": {
"NS_GEXF": "http://www.gexf.net/1.1draft",
"NS_VIZ": "http://www.gexf.net/1.1draft/viz",
"NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
"SCHEMALOCATION": " ".join(
[
"http://www.gexf.net/1.1draft",
"http://www.gexf.net/1.1draft/gexf.xsd",
]
),
"VERSION": "1.1",
},
"1.2draft": {
"NS_GEXF": "http://www.gexf.net/1.2draft",
"NS_VIZ": "http://www.gexf.net/1.2draft/viz",
"NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
"SCHEMALOCATION": " ".join(
[
"http://www.gexf.net/1.2draft",
"http://www.gexf.net/1.2draft/gexf.xsd",
]
),
"VERSION": "1.2",
},
"1.3": {
"NS_GEXF": "http://gexf.net/1.3",
"NS_VIZ": "http://gexf.net/1.3/viz",
"NS_XSI": "http://w3.org/2001/XMLSchema-instance",
"SCHEMALOCATION": " ".join(
[
"http://gexf.net/1.3",
"http://gexf.net/1.3/gexf.xsd",
]
),
"VERSION": "1.3",
},
}
def construct_types(self):
types = [
(int, "integer"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
(list, "string"),
(dict, "string"),
(int, "long"),
(str, "liststring"),
(str, "anyURI"),
(str, "string"),
]
# These additions to types allow writing numpy types
try:
import numpy as np
except ImportError:
pass
else:
# prepend so that python types are created upon read (last entry wins)
types = [
(np.float64, "float"),
(np.float32, "float"),
(np.float16, "float"),
(np.int_, "int"),
(np.int8, "int"),
(np.int16, "int"),
(np.int32, "int"),
(np.int64, "int"),
(np.uint8, "int"),
(np.uint16, "int"),
(np.uint32, "int"),
(np.uint64, "int"),
(np.int_, "int"),
(np.intc, "int"),
(np.intp, "int"),
] + types
self.xml_type = dict(types)
self.python_type = dict(reversed(a) for a in types)
# http://www.w3.org/TR/xmlschema-2/#boolean
convert_bool = {
"true": True,
"false": False,
"True": True,
"False": False,
"0": False,
0: False,
"1": True,
1: True,
}
def set_version(self, version):
d = self.versions.get(version)
if d is None:
raise nx.NetworkXError(f"Unknown GEXF version {version}.")
self.NS_GEXF = d["NS_GEXF"]
self.NS_VIZ = d["NS_VIZ"]
self.NS_XSI = d["NS_XSI"]
self.SCHEMALOCATION = d["SCHEMALOCATION"]
self.VERSION = d["VERSION"]
self.version = version
| GEXF |
python | numba__numba | numba/core/ir.py | {
"start": 27925,
"end": 28411
} | class ____(Stmt):
"""
Print some values.
"""
def __init__(self, args, vararg, loc):
assert all(isinstance(x, Var) for x in args)
assert vararg is None or isinstance(vararg, Var)
assert isinstance(loc, Loc)
self.args = tuple(args)
self.vararg = vararg
# Constant-inferred arguments
self.consts = {}
self.loc = loc
def __str__(self):
return 'print(%s)' % ', '.join(str(v) for v in self.args)
| Print |
python | kamyu104__LeetCode-Solutions | Python/tree-diameter.py | {
"start": 2892,
"end": 3754
} | class ____(object):
def treeDiameter(self, edges):
"""
:type edges: List[List[int]]
:rtype: int
"""
def bfs(root):
d = new_root = -1
lookup = [False]*len(adj)
lookup[root] = True
q = [root]
while q:
d, new_root = d+1, q[0]
new_q = []
for u in q:
for v in adj[u]:
if lookup[v]:
continue
lookup[v] = True
new_q.append(v)
q = new_q
return d, new_root
adj = [[] for _ in range(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
_, root = bfs(0)
d, _ = bfs(root)
return d
| Solution4 |
python | pydata__xarray | xarray/util/generate_aggregations.py | {
"start": 8151,
"end": 9092
} | class ____:
def __init__(
self,
name,
bool_reduce=False,
extra_kwargs=tuple(),
numeric_only=False,
see_also_modules=("numpy", "dask.array"),
see_also_methods=(),
min_flox_version=None,
additional_notes="",
):
self.name = name
self.extra_kwargs = extra_kwargs
self.numeric_only = numeric_only
self.see_also_modules = see_also_modules
self.see_also_methods = see_also_methods
self.min_flox_version = min_flox_version
self.additional_notes = additional_notes
if bool_reduce:
self.array_method = f"array_{name}"
self.np_example_array = (
"""np.array([True, True, True, True, True, False], dtype=bool)"""
)
else:
self.array_method = name
self.np_example_array = """np.array([1, 2, 3, 0, 2, np.nan])"""
@dataclass
| Method |
python | walkccc__LeetCode | solutions/3491. Phone Number Prefix/3491.py | {
"start": 0,
"end": 172
} | class ____:
def phonePrefix(self, numbers: list[str]) -> bool:
return not any(b.startswith(a)
for a, b in itertools.pairwise(sorted(numbers)))
| Solution |
python | walkccc__LeetCode | solutions/52. N-Queens II/52.py | {
"start": 0,
"end": 532
} | class ____:
def totalNQueens(self, n: int) -> int:
ans = 0
cols = [False] * n
diag1 = [False] * (2 * n - 1)
diag2 = [False] * (2 * n - 1)
def dfs(i: int) -> None:
nonlocal ans
if i == n:
ans += 1
return
for j in range(n):
if cols[j] or diag1[i + j] or diag2[j - i + n - 1]:
continue
cols[j] = diag1[i + j] = diag2[j - i + n - 1] = True
dfs(i + 1)
cols[j] = diag1[i + j] = diag2[j - i + n - 1] = False
dfs(0)
return ans
| Solution |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_handler_registry/plugin_handler.py | {
"start": 367,
"end": 1300
} | class ____(ActionHandler):
group = ActionHandler.Group.OTHER
config_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "The configuration schema for Plugin Actions",
"type": "object",
"properties": {
"target_identifier": {
"type": ["string", "null"],
},
"target_display": {
"type": ["string", "null"],
},
"target_type": {
"type": ["integer", "null"],
"enum": [None],
},
},
}
data_schema = {}
@staticmethod
def get_config_transformer() -> ConfigTransformer | None:
return None
@staticmethod
def execute(
job: WorkflowEventData,
action: Action,
detector: Detector,
) -> None:
execute_via_group_type_registry(job, action, detector)
| PluginActionHandler |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 34810,
"end": 35670
} | class ____(set, AnsibleTaggedObject):
__slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
def __copy__(self):
return super()._copy_collection()
def copy(self):
return copy.copy(self)
def __init__(self, value=None, *args, **kwargs):
if type(value) is _AnsibleTagsMapping: # pylint: disable=unidiomatic-typecheck
super().__init__(*args, **kwargs)
else:
super().__init__(value, *args, **kwargs)
def __new__(cls, value=None, *args, **kwargs):
return super()._new(value, *args, **kwargs)
def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
return super()._reduce(super().__reduce_ex__(protocol))
def __str__(self) -> str:
return self._native_copy().__str__()
def __repr__(self) -> str:
return self._native_copy().__repr__()
| _AnsibleTaggedSet |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 19613,
"end": 20293
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextLayer with Align->ChineseCLIP
| ChineseCLIPVisionMLP |
python | great-expectations__great_expectations | tests/expectations/test_dataclass_serializable_dot_dict_pattern.py | {
"start": 722,
"end": 809
} | class ____(Enum):
VALUE_X = "x"
VALUE_Y = "y"
VALUE_Z = "z"
@dataclass
| MyEnum |
python | tornadoweb__tornado | tornado/test/util_test.py | {
"start": 1043,
"end": 1244
} | class ____(Configurable):
@classmethod
def configurable_base(cls):
return TestConfigurable
@classmethod
def configurable_default(cls):
return TestConfig1
| TestConfigurable |
python | doocs__leetcode | solution/1400-1499/1466.Reorder Routes to Make All Paths Lead to the City Zero/Solution.py | {
"start": 0,
"end": 353
} | class ____:
def minReorder(self, n: int, connections: List[List[int]]) -> int:
def dfs(a: int, fa: int) -> int:
return sum(c + dfs(b, a) for b, c in g[a] if b != fa)
g = [[] for _ in range(n)]
for a, b in connections:
g[a].append((b, 1))
g[b].append((a, 0))
return dfs(0, -1)
| Solution |
python | realpython__materials | python-self-type/stack_string.py | {
"start": 25,
"end": 493
} | class ____:
def __init__(self) -> None:
self.items: list[Any] = []
def push(self, item: Any) -> "Stack":
self.items.append(item)
return self
def pop(self) -> Any:
if self.__bool__():
return self.items.pop()
else:
raise ValueError("Stack is empty")
def __bool__(self) -> bool:
return len(self.items) > 0
stack = Stack()
stack.push(1).push(2).push(3).pop()
print(stack.items)
| Stack |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py | {
"start": 29962,
"end": 33567
} | class ____(GoogleCloudBaseOperator):
"""
Lists previously requested builds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildsOperator`
:param location: The location of the project.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param page_size: Optional, number of results to return in the list.
:param filter_: Optional, the raw filter text to constrain the results.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id")
operator_extra_links = (CloudBuildListLink(),)
def __init__(
self,
*,
location: str = "global",
project_id: str = PROVIDE_PROJECT_ID,
page_size: int | None = None,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.location,
}
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_builds(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
filter_=self.filter_,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildListLink.persist(
context=context,
project_id=project_id,
)
return [Build.to_dict(result) for result in results]
| CloudBuildListBuildsOperator |
python | tornadoweb__tornado | tornado/test/gen_test.py | {
"start": 31862,
"end": 33825
} | class ____(AsyncTestCase):
async def native_root(self, x):
ctx_var.set(x)
await self.inner(x)
@gen.coroutine
def gen_root(self, x):
ctx_var.set(x)
yield
yield self.inner(x)
async def inner(self, x):
self.assertEqual(ctx_var.get(), x)
await self.gen_inner(x)
self.assertEqual(ctx_var.get(), x)
# IOLoop.run_in_executor doesn't automatically copy context
ctx = contextvars.copy_context()
await self.io_loop.run_in_executor(None, lambda: ctx.run(self.thread_inner, x))
self.assertEqual(ctx_var.get(), x)
# Neither does asyncio's run_in_executor.
await asyncio.get_event_loop().run_in_executor(
None, lambda: ctx.run(self.thread_inner, x)
)
self.assertEqual(ctx_var.get(), x)
@gen.coroutine
def gen_inner(self, x):
self.assertEqual(ctx_var.get(), x)
yield
self.assertEqual(ctx_var.get(), x)
def thread_inner(self, x):
self.assertEqual(ctx_var.get(), x)
@gen_test
def test_propagate(self):
# Verify that context vars get propagated across various
# combinations of native and decorated coroutines.
yield [
self.native_root(1),
self.native_root(2),
self.gen_root(3),
self.gen_root(4),
]
@gen_test
def test_reset(self):
token = ctx_var.set(1)
yield
# reset asserts that we are still at the same level of the context tree,
# so we must make sure that we maintain that property across yield.
ctx_var.reset(token)
@gen_test
def test_propagate_to_first_yield_with_native_async_function(self):
x = 10
async def native_async_function():
self.assertEqual(ctx_var.get(), x)
ctx_var.set(x)
yield native_async_function()
if __name__ == "__main__":
unittest.main()
| ContextVarsTest |
python | google__pytype | pytype/directors/directors_test.py | {
"start": 2699,
"end": 3755
} | class ____(unittest.TestCase):
python_version = sys.version_info[:2]
@classmethod
def setUpClass(cls):
super().setUpClass()
# Invoking the _error_name decorator will register the name as a valid
# error name.
for name in ["test-error", "test-other-error"]:
errors._error_name(name)
def _create(self, src, disable=()):
self.num_lines = len(src.rstrip().splitlines())
self.src = textwrap.dedent(src)
src_tree = directors.parse_src(self.src, self.python_version)
self._errorlog = errors.VmErrorLog(test_utils.FakePrettyPrinter(), self.src)
self._director = directors.Director(
src_tree, self._errorlog, _TEST_FILENAME, disable
)
def _should_report(
self, expected, line, error_name="test-error", filename=_TEST_FILENAME
):
error = errors.Error.for_test(
errors.SEVERITY_ERROR,
"message",
error_name,
filename=filename,
line=line,
src=self.src,
)
self.assertEqual(expected, self._director.filter_error(error))
| DirectorTestCase |
python | walkccc__LeetCode | solutions/766. Toeplitz Matrix/766.py | {
"start": 0,
"end": 247
} | class ____:
def isToeplitzMatrix(self, matrix: list[list[int]]) -> bool:
for i in range(len(matrix) - 1):
for j in range(len(matrix[0]) - 1):
if matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
| Solution |
python | simplejson__simplejson | simplejson/tests/test_speedups.py | {
"start": 660,
"end": 740
} | class ____:
def __bool__(self):
1/0
__nonzero__ = __bool__
| BadBool |
python | python__mypy | mypy/patterns.py | {
"start": 1179,
"end": 1506
} | class ____(Pattern):
"""The pattern <pattern> | <pattern> | ..."""
patterns: list[Pattern]
def __init__(self, patterns: list[Pattern]) -> None:
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_or_pattern(self)
| OrPattern |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 10723,
"end": 10853
} | class ____(desc_sig_element, _sig_element=True):
"""Node for an identifier in a signature."""
classes = ['n']
| desc_sig_name |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 6118,
"end": 6207
} | class ____(Exception):
"Table does not have a single obvious primary key"
| BadPrimaryKey |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 5158,
"end": 6418
} | class ____(SchedulerEvent):
"""
Signals that a scheduler has acquired a job for processing.
:param job_id: the ID of the job that was acquired
:param scheduler_id: the ID of the scheduler that acquired the job
:param task_id: ID of the task the job belongs to
:param schedule_id: ID of the schedule that
:param scheduled_start: the time the job was scheduled to start via a schedule (if
any)
"""
job_id: UUID = attrs.field(converter=as_uuid)
scheduler_id: str
task_id: str
schedule_id: str | None = None
scheduled_start: datetime | None = attrs.field(converter=as_aware_datetime)
@classmethod
def from_job(cls, job: Job, scheduler_id: str) -> JobAcquired:
"""
Create a new job-acquired event from a job and a scheduler ID.
:param job: the job that was acquired
:param scheduler_id: the ID of the scheduler that acquired the job
:return: a new job-acquired event
"""
return cls(
job_id=job.id,
scheduler_id=scheduler_id,
task_id=job.task_id,
schedule_id=job.schedule_id,
scheduled_start=job.scheduled_fire_time,
)
@attrs.define(kw_only=True, frozen=True)
| JobAcquired |
python | huggingface__transformers | src/transformers/models/gemma/modeling_gemma.py | {
"start": 22300,
"end": 22556
} | class ____(GenericForTokenClassification, GemmaPreTrainedModel):
pass
__all__ = [
"GemmaModel",
"GemmaForCausalLM",
"GemmaForSequenceClassification",
"GemmaForTokenClassification",
"GemmaPreTrainedModel",
]
| GemmaForTokenClassification |
python | pypa__pipenv | pipenv/vendor/dotenv/parser.py | {
"start": 1693,
"end": 5186
} | class ____:
def __init__(self, stream: IO[str]) -> None:
self.string = stream.read()
self.position = Position.start()
self.mark = Position.start()
def has_next(self) -> bool:
return self.position.chars < len(self.string)
def set_mark(self) -> None:
self.mark.set(self.position)
def get_marked(self) -> Original:
return Original(
string=self.string[self.mark.chars:self.position.chars],
line=self.mark.line,
)
def peek(self, count: int) -> str:
return self.string[self.position.chars:self.position.chars + count]
def read(self, count: int) -> str:
result = self.string[self.position.chars:self.position.chars + count]
if len(result) < count:
raise Error("read: End of string")
self.position.advance(result)
return result
def read_regex(self, regex: Pattern[str]) -> Sequence[str]:
match = regex.match(self.string, self.position.chars)
if match is None:
raise Error("read_regex: Pattern not found")
self.position.advance(self.string[match.start():match.end()])
return match.groups()
def decode_escapes(regex: Pattern[str], string: str) -> str:
def decode_match(match: Match[str]) -> str:
return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
return regex.sub(decode_match, string)
def parse_key(reader: Reader) -> Optional[str]:
char = reader.peek(1)
if char == "#":
return None
elif char == "'":
(key,) = reader.read_regex(_single_quoted_key)
else:
(key,) = reader.read_regex(_unquoted_key)
return key
def parse_unquoted_value(reader: Reader) -> str:
(part,) = reader.read_regex(_unquoted_value)
return re.sub(r"\s+#.*", "", part).rstrip()
def parse_value(reader: Reader) -> str:
char = reader.peek(1)
if char == u"'":
(value,) = reader.read_regex(_single_quoted_value)
return decode_escapes(_single_quote_escapes, value)
elif char == u'"':
(value,) = reader.read_regex(_double_quoted_value)
return decode_escapes(_double_quote_escapes, value)
elif char in (u"", u"\n", u"\r"):
return u""
else:
return parse_unquoted_value(reader)
def parse_binding(reader: Reader) -> Binding:
reader.set_mark()
try:
reader.read_regex(_multiline_whitespace)
if not reader.has_next():
return Binding(
key=None,
value=None,
original=reader.get_marked(),
error=False,
)
reader.read_regex(_export)
key = parse_key(reader)
reader.read_regex(_whitespace)
if reader.peek(1) == "=":
reader.read_regex(_equal_sign)
value: Optional[str] = parse_value(reader)
else:
value = None
reader.read_regex(_comment)
reader.read_regex(_end_of_line)
return Binding(
key=key,
value=value,
original=reader.get_marked(),
error=False,
)
except Error:
reader.read_regex(_rest_of_line)
return Binding(
key=None,
value=None,
original=reader.get_marked(),
error=True,
)
def parse_stream(stream: IO[str]) -> Iterator[Binding]:
reader = Reader(stream)
while reader.has_next():
yield parse_binding(reader)
| Reader |
python | mlflow__mlflow | examples/llama_index/workflow/workflow/events.py | {
"start": 124,
"end": 247
} | class ____(Event):
"""Event for triggering VectorStore index retrieval step."""
query: str
| VectorSearchRetrieveEvent |
python | joke2k__faker | faker/providers/bank/fil_PH/__init__.py | {
"start": 51,
"end": 220
} | class ____(EnPhBankProvider):
"""Implement bank provider for ``fil_PH`` locale.
There is no difference from the ``en_PH`` implementation.
"""
pass
| Provider |
python | coleifer__peewee | tests/sql.py | {
"start": 91824,
"end": 93486
} | class ____(BaseTestCase):
def test_simple_index(self):
pidx = Index('person_name', Person, (Person.name,), unique=True)
self.assertSQL(pidx, (
'CREATE UNIQUE INDEX "person_name" ON "person" ("name")'), [])
pidx = pidx.where(Person.dob > datetime.date(1950, 1, 1))
self.assertSQL(pidx, (
'CREATE UNIQUE INDEX "person_name" ON "person" '
'("name") WHERE ("dob" > ?)'), [datetime.date(1950, 1, 1)])
def test_advanced_index(self):
Article = Table('article')
aidx = Index('foo_idx', Article, (
Article.c.status,
Article.c.timestamp.desc(),
fn.SUBSTR(Article.c.title, 1, 1)), safe=True)
self.assertSQL(aidx, (
'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" '
'("status", "timestamp" DESC, SUBSTR("title", ?, ?))'), [1, 1])
aidx = aidx.where(Article.c.flags.bin_and(4) == 4)
self.assertSQL(aidx, (
'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" '
'("status", "timestamp" DESC, SUBSTR("title", ?, ?)) '
'WHERE (("flags" & ?) = ?)'), [1, 1, 4, 4])
# Check behavior when value-literals are enabled.
self.assertSQL(aidx, (
'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" '
'("status", "timestamp" DESC, SUBSTR("title", 1, 1)) '
'WHERE (("flags" & 4) = 4)'), [], value_literals=True)
def test_str_cols(self):
uidx = Index('users_info', User, ('username DESC', 'id'))
self.assertSQL(uidx, (
'CREATE INDEX "users_info" ON "users" (username DESC, id)'), [])
| TestIndex |
python | getsentry__sentry | tests/sentry/notifications/platform/msteams/test_provider.py | {
"start": 8163,
"end": 8815
} | class ____(TestCase):
def test_basic_fields(self) -> None:
provider = MSTeamsNotificationProvider()
assert provider.key == NotificationProviderKey.MSTEAMS
assert provider.target_class == IntegrationNotificationTarget
assert provider.target_resource_types == [
NotificationTargetResourceType.CHANNEL,
NotificationTargetResourceType.DIRECT_MESSAGE,
]
def test_is_available(self) -> None:
assert MSTeamsNotificationProvider.is_available() is False
assert MSTeamsNotificationProvider.is_available(organization=self.organization) is False
| MSTeamsNotificationProviderTest |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/data.py | {
"start": 11661,
"end": 12190
} | class ____(SpanProperty):
def __init__(self, spans: "Spans") -> None:
super().__init__(spans)
self.starts = IntList.of_length(len(self.spans))
self.ends = IntList.of_length(len(self.spans))
def start_span(self, i: int, label_index: int) -> None:
self.starts[i] = self.choice_count
def stop_span(self, i: int, *, discarded: bool) -> None:
self.ends[i] = self.choice_count
def finish(self) -> tuple[IntList, IntList]:
return (self.starts, self.ends)
| _starts_and_ends |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/data_loss_prevention.py | {
"start": 2431,
"end": 2719
} | class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Data Loss Prevention link."""
name = "Cloud DLP Deidentify Templates List"
key = "cloud_dlp_deidentify_templates_list_key"
format_str = DLP_DEIDENTIFY_TEMPLATES_LIST_LINK
| CloudDLPDeidentifyTemplatesListLink |
python | doocs__leetcode | solution/0400-0499/0460.LFU Cache/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def __init__(self, key: int, value: int) -> None:
self.key = key
self.value = value
self.freq = 1
self.prev = None
self.next = None
| Node |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 43143,
"end": 43676
} | class ____(BaseAsyncRealtimeConnectionResource):
async def update(
self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
) -> None:
"""Send this event to update a transcription session."""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "transcription_session.update", "session": session, "event_id": event_id}),
)
)
| AsyncRealtimeTranscriptionSessionResource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 218666,
"end": 219152
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeletePullRequestReviewComment"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The ID of the comment to delete."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeletePullRequestReviewCommentInput |
python | doocs__leetcode | solution/2400-2499/2403.Minimum Time to Kill All Monsters/Solution.py | {
"start": 0,
"end": 475
} | class ____:
def minimumTime(self, power: List[int]) -> int:
@cache
def dfs(mask: int) -> int:
if mask == 0:
return 0
ans = inf
gain = 1 + (n - mask.bit_count())
for i, x in enumerate(power):
if mask >> i & 1:
ans = min(ans, dfs(mask ^ (1 << i)) + (x + gain - 1) // gain)
return ans
n = len(power)
return dfs((1 << n) - 1)
| Solution |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 1091,
"end": 1236
} | class ____(Ambiguous):
""" Inherits from something ambiguous.
This could emit a warning when we will have
flow detection.
"""
| Good6 |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 6319,
"end": 6592
} | class ____():
def __getattribute__(self, name):
return getattr(Base(), name)
#? int()
Wrapper(Base()).ret(3)
#? ['ret']
Wrapper(Base()).ret
#? int()
Wrapper(Wrapper(Base())).ret(3)
#? ['ret']
Wrapper(Wrapper(Base())).ret
#? int()
Wrapper2(Base()).ret(3)
| Wrapper2 |
python | getsentry__sentry | tests/sentry/snuba/test_entity_subscriptions.py | {
"start": 21377,
"end": 23825
} | class ____(TestCase):
def test(self) -> None:
cases = [
(EventsEntitySubscription, SnubaQuery.Type.ERROR, Dataset.Events, "count()"),
(
PerformanceTransactionsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.Transactions,
"count()",
),
(
PerformanceMetricsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.Metrics,
"count()",
),
(
PerformanceMetricsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"count()",
),
(
PerformanceMetricsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.Metrics,
"count_unique(user)",
),
(
PerformanceMetricsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"count_unique(user)",
),
(
PerformanceMetricsEntitySubscription,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"max(d:transactions/sentry.process_profile.track_outcome@second)",
),
(
MetricsCountersEntitySubscription,
SnubaQuery.Type.CRASH_RATE,
Dataset.Metrics,
"percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
),
(
MetricsSetsEntitySubscription,
SnubaQuery.Type.CRASH_RATE,
Dataset.Metrics,
"percentage(users_crashed, users) AS _crash_rate_alert_aggregate",
),
]
for expected_entity_subscription, query_type, dataset, aggregate in cases:
snuba_query = SnubaQuery.objects.create(
time_window=60,
type=query_type.value,
dataset=dataset.value,
aggregate=aggregate,
resolution=5,
)
assert isinstance(
get_entity_subscription_from_snuba_query(snuba_query, self.organization.id),
expected_entity_subscription,
)
| GetEntitySubscriptionFromSnubaQueryTest |
python | pandas-dev__pandas | pandas/io/parquet.py | {
"start": 4496,
"end": 4919
} | class ____:
@staticmethod
def validate_dataframe(df: DataFrame) -> None:
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
def write(self, df: DataFrame, path, compression, **kwargs) -> None:
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs) -> DataFrame:
raise AbstractMethodError(self)
| BaseImpl |
python | keras-team__keras | keras/src/layers/regularization/gaussian_dropout_test.py | {
"start": 125,
"end": 1125
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
| GaussianDropoutTest |
python | astropy__astropy | astropy/coordinates/angles/core.py | {
"start": 19903,
"end": 24340
} | class ____(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude) or (
isinstance(angle, str) and angle.endswith(("E", "W"))
):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself.
"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
# For speed, compare using "is", which is not strictly guaranteed to hold,
# but if it doesn't we'll just convert correctly in the 'else' clause.
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = 0.5 * np.pi
else:
limit = u.degree.to(angles.unit, 90.0)
angles_view = angles.view(np.ndarray)
if NUMPY_LT_2_0:
# Ensure ndim>=1 so that comparison is done using the angle dtype.
# Otherwise, e.g., np.array(np.pi/2, 'f4') > np.pi/2 will yield True.
angles_view = angles_view[np.newaxis]
if np.any(np.abs(angles_view) > limit):
if np.size(angles) < 5:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle "
f"<= 90 deg, got {angles.to(u.degree)}"
)
else:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle "
f"<= 90 deg, got {angles.min().to(u.degree)} <= "
f"angle <= {angles.max().to(u.degree)}"
)
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
| Latitude |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 25145,
"end": 25787
} | class ____(BaseOxmlElement):
"""
``<w:parent>`` element, an invented element for use in testing.
"""
eg_zooChoice = ZeroOrOneChoice(
(Choice("w:choice"), Choice("w:choice2")),
successors=("w:oomChild", "w:oooChild"),
)
oomChild = OneOrMore("w:oomChild", successors=("w:oooChild", "w:zomChild", "w:zooChild"))
oooChild = OneAndOnlyOne("w:oooChild")
zomChild = ZeroOrMore("w:zomChild", successors=("w:zooChild",))
zooChild = ZeroOrOne("w:zooChild", successors=())
optAttr = OptionalAttribute("w:optAttr", ST_IntegerType)
reqAttr = RequiredAttribute("reqAttr", ST_IntegerType)
| CT_Parent |
python | huggingface__transformers | src/transformers/models/efficientnet/modeling_efficientnet.py | {
"start": 8725,
"end": 11764
} | class ____(nn.Module):
r"""
This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.
Args:
config ([`EfficientNetConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
"""
def __init__(
self,
config: EfficientNetConfig,
in_dim: int,
out_dim: int,
stride: int,
expand_ratio: int,
kernel_size: int,
drop_rate: float,
id_skip: bool,
adjust_padding: bool,
):
super().__init__()
self.expand_ratio = expand_ratio
self.expand = self.expand_ratio != 1
expand_in_dim = in_dim * expand_ratio
if self.expand:
self.expansion = EfficientNetExpansionLayer(
config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
)
self.depthwise_conv = EfficientNetDepthwiseLayer(
config=config,
in_dim=expand_in_dim if self.expand else in_dim,
stride=stride,
kernel_size=kernel_size,
adjust_padding=adjust_padding,
)
self.squeeze_excite = EfficientNetSqueezeExciteLayer(
config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
)
self.projection = EfficientNetFinalBlockLayer(
config=config,
in_dim=expand_in_dim if self.expand else in_dim,
out_dim=out_dim,
stride=stride,
drop_rate=drop_rate,
id_skip=id_skip,
)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
embeddings = hidden_states
# Expansion and depthwise convolution phase
if self.expand_ratio != 1:
hidden_states = self.expansion(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
# Squeeze and excite phase
hidden_states = self.squeeze_excite(hidden_states)
hidden_states = self.projection(embeddings, hidden_states)
return hidden_states
| EfficientNetBlock |
python | tqdm__tqdm | benchmarks/benchmarks.py | {
"start": 138,
"end": 2381
} | class ____:
"""Running time of wrapped empty loops"""
def __init__(self, length):
try:
from time import process_time
self.time = process_time
except ImportError:
from time import clock
self.time = clock
self.iterable = range(int(length))
def run(self, cls):
pbar = cls(self.iterable)
t0 = self.time()
[0 for _ in pbar] # pylint: disable=pointless-statement
t1 = self.time()
return t1 - t0
def run_by_name(self, method):
return getattr(self, method.replace("-", "_"))()
def no_progress(self):
return self.run(lambda x: x)
def tqdm_optimised(self):
from tqdm import tqdm
return self.run(partial(tqdm, miniters=6e5, smoothing=0))
def tqdm(self):
from tqdm import tqdm
return self.run(tqdm)
def alive_progress(self):
from alive_progress import alive_bar
class wrapper:
def __init__(self, iterable):
self.iterable = iterable
def __iter__(self):
iterable = self.iterable
with alive_bar(len(iterable)) as bar:
for i in iterable:
yield i
bar()
return self.run(wrapper)
# def progressbar(self):
# from progressbar.progressbar import ProgressBar
# return self.run(ProgressBar())
def progressbar2(self):
from progressbar import progressbar
return self.run(progressbar)
def rich(self):
from rich.progress import track
return self.run(track)
# thorough test against no-progress
slow = Comparison(6e6)
def track_tqdm(method):
return slow.run_by_name(method)
track_tqdm.params = ["tqdm", "tqdm-optimised", "no-progress"]
track_tqdm.param_names = ["method"]
track_tqdm.unit = "Seconds (lower is better)"
# quick test against alternatives
fast = Comparison(1e5)
def track_alternatives(library):
return fast.run_by_name(library)
track_alternatives.params = ["rich", "progressbar2", "alive-progress", "tqdm"]
track_alternatives.param_names = ["library"]
track_alternatives.unit = "Seconds (lower is better)"
| Comparison |
python | imageio__imageio | imageio/plugins/freeimage.py | {
"start": 13621,
"end": 14645
} | class ____(FreeimageFormat):
"""A PNM format based on the Freeimage library.
This format supports single bit (PBM), grayscale (PGM) and RGB (PPM)
images, even with ASCII or binary coding.
The freeimage plugin requires a `freeimage` binary. If this binary
not available on the system, it can be downloaded manually from
<https://github.com/imageio/imageio-binaries> by either
- the command line script ``imageio_download_bin freeimage``
- the Python method ``imageio.plugins.freeimage.download()``
Parameters for saving
---------------------
use_ascii : bool
Save with ASCII coding. Default True.
"""
class Writer(FreeimageFormat.Writer):
def _open(self, flags=0, use_ascii=True):
# Build flags from kwargs
flags = int(flags)
if use_ascii:
flags |= IO_FLAGS.PNM_SAVE_ASCII
# Act as usual, but with modified flags
return FreeimageFormat.Writer._open(self, flags)
| FreeimagePnmFormat |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 9660,
"end": 10569
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent,)
name = "LogsCapturedEvent"
fileKey = graphene.NonNull(graphene.String)
stepKeys = graphene.List(graphene.NonNull(graphene.String))
externalUrl = graphene.String()
externalStdoutUrl = graphene.String()
externalStderrUrl = graphene.String()
shellCmd = graphene.Field(GrapheneLogRetrievalShellCommand)
pid = graphene.Int()
# legacy name for compute log file key... required for back-compat reasons, but has been
# renamed to fileKey for newer versions of the Dagster UI
logKey = graphene.NonNull(graphene.String)
def _construct_asset_event_metadata_params(event, metadata):
metadata_params = {"label": metadata.label, "description": metadata.description}
metadata_params.update(construct_basic_params(event))
return metadata_params
| GrapheneLogsCapturedEvent |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/tests/test_iofuncs.py | {
"start": 1487,
"end": 1733
} | class ____:
"""A custom class of objects for testing."""
def __init__(self, data):
self.data = None
if data:
self.data = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
| CustomObj |
python | Textualize__textual | tests/test_modal.py | {
"start": 1083,
"end": 2563
} | class ____(App):
"""An app with a modal dialog."""
BINDINGS = [("q", "request_quit", "Quit")]
CSS = """
QuitScreen {
align: center middle;
}
#dialog {
grid-size: 2;
grid-gutter: 1 2;
grid-rows: 1fr 3;
padding: 0 1;
width: 60;
height: 11;
border: thick $background 80%;
background: $surface;
}
#question {
column-span: 2;
height: 1fr;
width: 1fr;
content-align: center middle;
}
Button {
width: 100%;
}
"""
def compose(self) -> ComposeResult:
yield Header()
yield Label(TEXT)
yield Footer()
def action_request_quit(self) -> None:
"""Action to display the quit dialog."""
def check_quit(quit: bool | None) -> None:
"""Called when QuitScreen is dismissed."""
if quit:
self.exit()
self.push_screen(QuitScreen(), check_quit)
async def test_modal_pop_screen():
# https://github.com/Textualize/textual/issues/4656
app = ModalApp()
async with app.run_test() as pilot:
# Pause to ensure the footer is fully composed to avoid flakiness in CI
await pilot.pause()
await pilot.pause() # Required in Windows
assert await pilot.click("FooterKey")
assert await app.wait_for_refresh()
assert isinstance(app.screen, QuitScreen)
# Check activating the quit button exits the app
await pilot.press("enter")
assert pilot.app._exit
| ModalApp |
python | openai__openai-python | src/openai/types/evals/create_eval_completions_run_data_source.py | {
"start": 5304,
"end": 7503
} | class ____(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
reasoning_effort: Optional[ReasoningEffort] = None
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
response_format: Optional[SamplingParamsResponseFormat] = None
"""An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
"""
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
temperature: Optional[float] = None
"""A higher temperature increases randomness in the outputs."""
tools: Optional[List[ChatCompletionFunctionTool]] = None
"""A list of tools the model may call.
Currently, only functions are supported as a tool. Use this to provide a list of
functions the model may generate JSON inputs for. A max of 128 functions are
supported.
"""
top_p: Optional[float] = None
"""An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
| SamplingParams |
python | bokeh__bokeh | src/bokeh/themes/theme.py | {
"start": 2103,
"end": 8938
} | class ____:
''' Provide new default values for Bokeh models.
Bokeh Model properties all have some built-in default value. If a property
has not been explicitly set (e.g. ``m.foo = 10``), accessing the
property will return the default value. It may be useful for users to be
able to specify a different set of default values than the built-in
default. The ``Theme`` class allows collections of custom default values
to be easily applied to Bokeh documents.
The ``Theme`` class can be constructed either from a YAML file or from a
JSON dict (but not both). Examples of both formats are shown below.
The plotting API defaults override some theme properties. Namely:
`fill_alpha`, `fill_color`, `line_alpha`, `line_color`, `text_alpha` and
`text_color`. Those properties should therefore be set explicitly when
using the plotting API.
Args:
filename (str, optional) : path to a YAML theme file
json (str, optional) : a JSON dictionary specifying theme values
Raises:
ValueError
If neither ``filename`` nor ``json`` is supplied.
Examples:
Themes are specified by providing a top-level key ``attrs`` which
has blocks for Model types to be themed. Each block has keys and
values that specify the new property defaults for that type.
Take note of the fact that YAML interprets the value `None` as
a string, which is not usually what you want. To give `None` as a
value in YAML, use `!!null`. To give 'None' as a value in json,
use `null`.
Here is an example theme in YAML format that sets various visual
properties for all figures, grids, and titles:
.. code-block:: yaml
attrs:
Plot:
background_fill_color: '#2F2F2F'
border_fill_color: '#2F2F2F'
outline_line_color: '#444444'
Axis:
axis_line_color: !!null
Grid:
grid_line_dash: [6, 4]
grid_line_alpha: .3
Title:
text_color: "white"
Here is the same theme, in JSON format:
.. code-block:: python
{
'attrs' : {
'Plot': {
'background_fill_color': '#2F2F2F',
'border_fill_color': '#2F2F2F',
'outline_line_color': '#444444',
},
'Axis': {
'axis_line_color': None,
},
'Grid': {
'grid_line_dash': [6, 4],
'grid_line_alpha': .3,
},
'Title': {
'text_color': 'white'
}
}
}
'''
_by_class_cache: dict[str, dict[str, Any]]
_line_defaults: dict[str, Any]
_fill_defaults: dict[str, Any]
_text_defaults: dict[str, Any]
_json: dict[str, Any]
@overload
def __init__(self, filename: PathLike) -> None: ...
@overload
def __init__(self, json: dict[str, Any]) -> None: ...
def __init__(self, filename: PathLike | None = None, json: dict[str, Any] | None = None) -> None:
if (filename is not None) and (json is not None):
raise ValueError("Theme should be constructed from a file or from json not both")
if filename is not None:
with open(filename, "rb") as f:
json = yaml.safe_load(f)
# empty docs result in None rather than {}, fix it.
if json is None:
json = {}
if json is None:
raise ValueError("Theme requires json or a filename to construct")
self._json = json
if 'attrs' not in self._json:
self._json['attrs'] = {}
if not isinstance(self._json['attrs'], dict):
raise ValueError(f"theme problem: attrs field should be a dictionary of class names, not {self._json['attrs']!r}")
for key, value in self._json['attrs'].items():
if not isinstance(value, dict):
raise ValueError(f"theme problem: attrs.{key} should be a dictionary of properties, not {value!r}")
self._line_defaults = self._json.get('line_defaults', _empty_dict)
self._fill_defaults = self._json.get('fill_defaults', _empty_dict)
self._text_defaults = self._json.get('text_defaults', _empty_dict)
# mapping from class name to the full set of properties
# (including those merged in from base classes) for that
# class.
self._by_class_cache = {}
def _add_glyph_defaults(self, cls: type[HasProps], props: dict[str, Any]) -> None:
from ..models.glyph import Glyph
if issubclass(cls, Glyph):
if hasattr(cls, "line_alpha"):
props.update(self._line_defaults)
if hasattr(cls, "fill_alpha"):
props.update(self._fill_defaults)
if hasattr(cls, "text_alpha"):
props.update(self._text_defaults)
def _for_class(self, cls: type[Model]) -> dict[str, Any]:
if cls.__name__ not in self._by_class_cache:
attrs = self._json['attrs']
combined: dict[str, Any] = {}
# we go in reverse order so that subclass props override base class
for base in cls.__mro__[-2::-1]:
if not issubclass(base, HasProps):
continue
self._add_glyph_defaults(base, combined)
combined.update(attrs.get(base.__name__, _empty_dict))
if len(combined) == 0:
combined = _empty_dict
self._by_class_cache[cls.__name__] = combined
return self._by_class_cache[cls.__name__]
def apply_to_model(self, model: Model) -> None:
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the |Document| the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------
| Theme |
python | django-extensions__django-extensions | django_extensions/db/fields/json.py | {
"start": 555,
"end": 770
} | class ____(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
| JSONDict |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_adx.py | {
"start": 2058,
"end": 3943
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE, "provide_context": True}
self.dag = DAG(TEST_DAG_ID + "test_schedule_dag_once", default_args=args, schedule="@once")
self.operator = AzureDataExplorerQueryOperator(dag=self.dag, **MOCK_DATA)
def test_init(self):
assert self.operator.task_id == MOCK_DATA["task_id"]
assert self.operator.query == MOCK_DATA["query"]
assert self.operator.database == MOCK_DATA["database"]
assert self.operator.azure_data_explorer_conn_id == "azure_data_explorer_default"
@mock.patch.object(AzureDataExplorerHook, "run_query", return_value=MockResponse())
@mock.patch.object(AzureDataExplorerHook, "get_conn")
def test_run_query(self, mock_conn, mock_run_query):
self.operator.execute(None)
mock_run_query.assert_called_once_with(
MOCK_DATA["query"], MOCK_DATA["database"], MOCK_DATA["options"]
)
@mock.patch.object(AzureDataExplorerHook, "run_query", return_value=MockResponse())
@mock.patch.object(AzureDataExplorerHook, "get_conn")
def test_azure_data_explorer_query_operator_xcom_push_and_pull(
mock_conn,
mock_run_query,
create_task_instance_of_operator,
request,
):
if AIRFLOW_V_3_0_PLUS:
run_task = request.getfixturevalue("run_task")
task = AzureDataExplorerQueryOperator(**MOCK_DATA)
run_task(task=task)
assert run_task.xcom.get(key="return_value", task_id=task.task_id) == str(MOCK_RESULT)
else:
ti = create_task_instance_of_operator(
AzureDataExplorerQueryOperator,
dag_id="test_azure_data_explorer_query_operator_xcom_push_and_pull",
**MOCK_DATA,
)
ti.run()
assert ti.xcom_pull(task_ids=MOCK_DATA["task_id"]) == str(MOCK_RESULT)
| TestAzureDataExplorerQueryOperator |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_build_image/test_steps/test_common.py | {
"start": 342,
"end": 5812
} | class ____:
@pytest.fixture(scope="class")
def faker_connector(self, all_connectors):
for connector in all_connectors:
if connector.technical_name == "source-faker":
return connector
pytest.fail("Could not find the source-faker connector.")
@pytest.fixture
def test_context(self, mocker, dagger_client, faker_connector, tmp_path):
return mocker.Mock(
secrets_to_mask=[],
dagger_client=dagger_client,
connector=faker_connector,
host_image_export_dir_path=tmp_path,
git_revision="test-revision",
diffed_branch="test-branch",
git_repo_url="test-repo-url",
)
@pytest.fixture
def bad_docker_host(self):
original_docker_host = os.environ.get("DOCKER_HOST")
yield "tcp://localhost:9999"
if original_docker_host:
os.environ["DOCKER_HOST"] = original_docker_host
else:
del os.environ["DOCKER_HOST"]
@pytest.mark.parametrize(
"platforms",
[
[dagger.Platform("linux/arm64")],
[dagger.Platform("linux/amd64")],
[dagger.Platform("linux/amd64"), dagger.Platform("linux/arm64")],
],
)
async def test_run(self, dagger_client, test_context, platforms):
"""Test that the step runs successfully and that the image is loaded in the local docker host."""
built_containers = {
platform: dagger_client.container(platform=platform).from_(f'{test_context.connector.metadata["dockerRepository"]}:latest')
for platform in platforms
}
step = common.LoadContainerToLocalDockerHost(test_context)
assert step.image_tag == "dev"
docker_client = docker.from_env()
step.image_tag = "test-load-container"
for platform in platforms:
full_image_name = f"{test_context.connector.metadata['dockerRepository']}:{step.image_tag}-{platform.replace('/', '-')}"
try:
docker_client.images.remove(full_image_name, force=True)
except docker.errors.ImageNotFound:
pass
result = await step.run(built_containers)
assert result.status is StepStatus.SUCCESS
multi_platforms = len(platforms) > 1
for platform in platforms:
if multi_platforms:
full_image_name = f"{test_context.connector.metadata['dockerRepository']}:{step.image_tag}-{platform.replace('/', '-')}"
else:
full_image_name = f"{test_context.connector.metadata['dockerRepository']}:{step.image_tag}"
docker_client.images.get(full_image_name)
# CI can't run docker arm64 containers
if platform is LOCAL_BUILD_PLATFORM or (os.environ.get("CI", "false").lower() != "true"):
docker_client.containers.run(full_image_name, "spec")
docker_client.images.remove(full_image_name, force=True)
async def test_run_export_failure(self, dagger_client, test_context, mocker):
"""Test that the step fails if the export of the container fails."""
built_containers = {
LOCAL_BUILD_PLATFORM: dagger_client.container(platform=LOCAL_BUILD_PLATFORM).from_(
f'{test_context.connector.metadata["dockerRepository"]}:latest'
)
}
step = common.LoadContainerToLocalDockerHost(test_context)
mocker.patch.object(common, "export_container_to_tarball", return_value=(None, None))
result = await step.run(built_containers)
assert result.status is StepStatus.FAILURE
assert "Failed to export the connector image" in result.stderr
async def test_run_connection_error(self, dagger_client, test_context, bad_docker_host):
"""Test that the step fails if the connection to the docker host fails."""
built_containers = {
LOCAL_BUILD_PLATFORM: dagger_client.container(platform=LOCAL_BUILD_PLATFORM).from_(
f'{test_context.connector.metadata["dockerRepository"]}:latest'
)
}
step = common.LoadContainerToLocalDockerHost(test_context)
os.environ["DOCKER_HOST"] = bad_docker_host
result = await step.run(built_containers)
assert result.status is StepStatus.FAILURE
assert "Something went wrong while interacting with the local docker client" in result.stderr
async def test_run_import_failure(self, dagger_client, test_context, mocker):
"""Test that the step fails if the docker import of the tar fails."""
built_containers = {
LOCAL_BUILD_PLATFORM: dagger_client.container(platform=LOCAL_BUILD_PLATFORM).from_(
f'{test_context.connector.metadata["dockerRepository"]}:latest'
)
}
step = common.LoadContainerToLocalDockerHost(test_context)
mock_docker_client = mocker.MagicMock()
mock_docker_client.api.import_image_from_file.return_value = "bad response"
mock_docker_client.images.load.side_effect = docker.errors.DockerException("test error")
mocker.patch.object(common.docker, "from_env", return_value=mock_docker_client)
result = await step.run(built_containers)
assert result.status is StepStatus.FAILURE
assert "Something went wrong while interacting with the local docker client: test error" in result.stderr
| TestLoadContainerToLocalDockerHost |
python | imageio__imageio | imageio/plugins/_dicom.py | {
"start": 22347,
"end": 34072
} | class ____(object):
"""DicomSeries
This class represents a serie of dicom files (SimpleDicomReader
objects) that belong together. If these are multiple files, they
represent the slices of a volume (like for CT or MRI).
"""
def __init__(self, suid, progressIndicator):
# Init dataset list and the callback
self._entries = []
# Init props
self._suid = suid
self._info = {}
self._progressIndicator = progressIndicator
def __len__(self):
return len(self._entries)
def __iter__(self):
return iter(self._entries)
def __getitem__(self, index):
return self._entries[index]
@property
def suid(self):
return self._suid
@property
def shape(self):
"""The shape of the data (nz, ny, nx)."""
return self._info["shape"]
@property
def sampling(self):
"""The sampling (voxel distances) of the data (dz, dy, dx)."""
return self._info["sampling"]
@property
def info(self):
"""A dictionary containing the information as present in the
first dicomfile of this serie. None if there are no entries."""
return self._info
@property
def description(self):
"""A description of the dicom series. Used fields are
PatientName, shape of the data, SeriesDescription, and
ImageComments.
"""
info = self.info
# If no info available, return simple description
if not info: # pragma: no cover
return "DicomSeries containing %i images" % len(self)
fields = []
# Give patient name
if "PatientName" in info:
fields.append("" + info["PatientName"])
# Also add dimensions
if self.shape:
tmp = [str(d) for d in self.shape]
fields.append("x".join(tmp))
# Try adding more fields
if "SeriesDescription" in info:
fields.append("'" + info["SeriesDescription"] + "'")
if "ImageComments" in info:
fields.append("'" + info["ImageComments"] + "'")
# Combine
return " ".join(fields)
def __repr__(self):
adr = hex(id(self)).upper()
return "<DicomSeries with %i images at %s>" % (len(self), adr)
def get_numpy_array(self):
"""Get (load) the data that this DicomSeries represents, and return
it as a numpy array. If this serie contains multiple images, the
resulting array is 3D, otherwise it's 2D.
"""
# It's easy if no file or if just a single file
if len(self) == 0:
raise ValueError("Serie does not contain any files.")
elif len(self) == 1:
return self[0].get_numpy_array()
# Check info
if self.info is None:
raise RuntimeError("Cannot return volume if series not finished.")
# Init data (using what the dicom packaged produces as a reference)
slice = self[0].get_numpy_array()
vol = np.zeros(self.shape, dtype=slice.dtype)
vol[0] = slice
# Fill volume
self._progressIndicator.start("loading data", "", len(self))
for z in range(1, len(self)):
vol[z] = self[z].get_numpy_array()
self._progressIndicator.set_progress(z + 1)
self._progressIndicator.finish()
# Done
import gc
gc.collect()
return vol
def _append(self, dcm):
self._entries.append(dcm)
def _sort(self):
self._entries.sort(
key=lambda k: (
k.InstanceNumber,
(
k.ImagePositionPatient[2]
if hasattr(k, "ImagePositionPatient")
else None
),
)
)
def _finish(self):
"""
Evaluate the series of dicom files. Together they should make up
a volumetric dataset. This means the files should meet certain
conditions. Also some additional information has to be calculated,
such as the distance between the slices. This method sets the
attributes for "shape", "sampling" and "info".
This method checks:
* that there are no missing files
* that the dimensions of all images match
* that the pixel spacing of all images match
"""
# The datasets list should be sorted by instance number
L = self._entries
if len(L) == 0:
return
elif len(L) == 1:
self._info = L[0].info
return
# Get previous
ds1 = L[0]
# Init measures to calculate average of
distance_sum = 0.0
# Init measures to check (these are in 2D)
dimensions = ds1.Rows, ds1.Columns
# sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
sampling = ds1.info["sampling"][:2] # row, column
for index in range(len(L)):
# The first round ds1 and ds2 will be the same, for the
# distance calculation this does not matter
# Get current
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Update distance_sum to calculate distance later
distance_sum += abs(pos1 - pos2)
# Test measures
dimensions2 = ds2.Rows, ds2.Columns
# sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
sampling2 = ds2.info["sampling"][:2] # row, column
if dimensions != dimensions2:
# We cannot produce a volume if the dimensions match
raise ValueError("Dimensions of slices does not match.")
if sampling != sampling2:
# We can still produce a volume, but we should notify the user
self._progressIndicator.write("Warn: sampling does not match.")
# Store previous
ds1 = ds2
# Finish calculating average distance
# (Note that there are len(L)-1 distances)
distance_mean = distance_sum / (len(L) - 1)
# Set info dict
self._info = L[0].info.copy()
# Store information that is specific for the serie
self._info["shape"] = (len(L),) + ds2.info["shape"]
self._info["sampling"] = (distance_mean,) + ds2.info["sampling"]
def list_files(files, path):
"""List all files in the directory, recursively."""
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir(item):
list_files(files, item)
elif os.path.isfile(item):
files.append(item)
def process_directory(request, progressIndicator, readPixelData=False):
"""
Reads dicom files and returns a list of DicomSeries objects, which
contain information about the data, and can be used to load the
image or volume data.
if readPixelData is True, the pixel data of all series is read. By
default the loading of pixeldata is deferred until it is requested
using the DicomSeries.get_pixel_array() method. In general, both
methods should be equally fast.
"""
# Get directory to examine
if os.path.isdir(request.filename):
path = request.filename
elif os.path.isfile(request.filename):
path = os.path.dirname(request.filename)
else: # pragma: no cover - tested earlier
raise ValueError("Dicom plugin needs a valid filename to examine the directory")
# Check files
files = []
list_files(files, path) # Find files recursively
# Gather file data and put in DicomSeries
series = {}
count = 0
progressIndicator.start("examining files", "files", len(files))
for filename in files:
# Show progress (note that we always start with a 0.0)
count += 1
progressIndicator.set_progress(count)
# Skip DICOMDIR files
if filename.count("DICOMDIR"): # pragma: no cover
continue
# Try loading dicom ...
try:
dcm = SimpleDicomReader(filename)
except NotADicomFile:
continue # skip non-dicom file
except Exception as why: # pragma: no cover
progressIndicator.write(str(why))
continue
# Get SUID and register the file with an existing or new series object
try:
suid = dcm.SeriesInstanceUID
except AttributeError: # pragma: no cover
continue # some other kind of dicom file
if suid not in series:
series[suid] = DicomSeries(suid, progressIndicator)
series[suid]._append(dcm)
# Finish progress
# progressIndicator.finish('Found %i series.' % len(series))
# Make a list and sort, so that the order is deterministic
series = list(series.values())
series.sort(key=lambda x: x.suid)
# Split series if necessary
for serie in reversed([serie for serie in series]):
splitSerieIfRequired(serie, series, progressIndicator)
# Finish all series
# progressIndicator.start('analyse series', '', len(series))
series_ = []
for i in range(len(series)):
try:
series[i]._finish()
series_.append(series[i])
except Exception as err: # pragma: no cover
progressIndicator.write(str(err))
pass # Skip serie (probably report-like file without pixels)
# progressIndicator.set_progress(i+1)
progressIndicator.finish("Found %i correct series." % len(series_))
# Done
return series_
def splitSerieIfRequired(serie, series, progressIndicator):
"""
Split the serie in multiple series if this is required. The choice
is based on examing the image position relative to the previous
image. If it differs too much, it is assumed that there is a new
dataset. This can happen for example in unspitted gated CT data.
"""
# Sort the original list and get local name
serie._sort()
L = serie._entries
# Init previous slice
ds1 = L[0]
# Check whether we can do this
if "ImagePositionPatient" not in ds1:
return
# Initialize a list of new lists
L2 = [[ds1]]
# Init slice distance estimate
distance = 0
for index in range(1, len(L)):
# Get current slice
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Get distances
newDist = abs(pos1 - pos2)
# deltaDist = abs(firstPos-pos2)
# If the distance deviates more than 2x from what we've seen,
# we can agree it's a new dataset.
if distance and newDist > 2.1 * distance:
L2.append([])
distance = 0
else:
# Test missing file
if distance and newDist > 1.5 * distance:
progressIndicator.write(
"Warning: missing file after %r" % ds1._filename
)
distance = newDist
# Add to last list
L2[-1].append(ds2)
# Store previous
ds1 = ds2
# Split if we should
if len(L2) > 1:
# At what position are we now?
i = series.index(serie)
# Create new series
series2insert = []
for L in L2:
newSerie = DicomSeries(serie.suid, progressIndicator)
newSerie._entries = L
series2insert.append(newSerie)
# Insert series and remove self
for newSerie in reversed(series2insert):
series.insert(i, newSerie)
series.remove(serie)
| DicomSeries |
python | walkccc__LeetCode | solutions/1317. Convert Integer to the Sum of Two No-Zero Integers/1317.py | {
"start": 0,
"end": 177
} | class ____:
def getNoZeroIntegers(self, n: int) -> list[int]:
for A in range(n):
B = n - A
if '0' not in str(A) and '0' not in str(B):
return A, B
| Solution |
python | keras-team__keras | keras/src/legacy/saving/legacy_h5_format_test.py | {
"start": 2287,
"end": 3800
} | class ____(testing.TestCase):
def _check_reloading_weights(self, ref_input, model, tf_keras_model):
ref_output = tf_keras_model(ref_input)
initial_weights = model.get_weights()
# Check weights only file
temp_filepath = os.path.join(self.get_temp_dir(), "weights.h5")
tf_keras_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
model.set_weights(initial_weights)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def test_sequential_model_weights(self):
model = get_sequential_model(keras)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def test_functional_model_weights(self):
model = get_functional_model(keras)
tf_keras_model = get_functional_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def test_subclassed_model_weights(self):
model = get_subclassed_model(keras)
tf_keras_model = get_subclassed_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
@pytest.mark.requires_trainable_backend
| LegacyH5WeightsTest |
python | scrapy__scrapy | tests/test_engine.py | {
"start": 1670,
"end": 2544
} | class ____(Spider):
name = "scrapytest.org"
itemurl_re = re.compile(r"item\d+.html")
name_re = re.compile(r"<h1>(.*?)</h1>", re.MULTILINE)
price_re = re.compile(r">Price: \$(.*?)<", re.MULTILINE)
item_cls: type = MyItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
adapter = ItemAdapter(self.item_cls())
m = self.name_re.search(response.text)
if m:
adapter["name"] = m.group(1)
adapter["url"] = response.url
m = self.price_re.search(response.text)
if m:
adapter["price"] = m.group(1)
return adapter.item
| MySpider |
python | eth-brownie__brownie | brownie/_gui/root.py | {
"start": 2702,
"end": 3312
} | class ____(ttk.Frame):
def __init__(self, root):
super().__init__(root)
# geometry
self.columnconfigure(0, weight=1)
self.columnconfigure(1, minsize=280)
self.rowconfigure(0, weight=1)
self.rowconfigure(1, minsize=24)
self.oplist = OpcodeList(self, (("pc", 80), ("opcode", 200)))
self.oplist.grid(row=0, column=1, rowspan=2, sticky="nsew")
self.note = SourceNoteBook(self)
self.note.grid(row=0, column=0, sticky="nsew")
self.console = Console(self)
self.console.grid(row=1, column=0, sticky="nsew")
| MainFrame |
python | RaRe-Technologies__gensim | gensim/topic_coherence/text_analysis.py | {
"start": 13787,
"end": 14054
} | class ____(WordOccurrenceAccumulator):
"""Monkey patched for multiprocessing worker usage, to move some of the logic to the master process."""
def _iter_texts(self, texts):
return texts # master process will handle this
| PatchedWordOccurrenceAccumulator |
python | bokeh__bokeh | tests/support/util/screenshot.py | {
"start": 1673,
"end": 3546
} | class ____(TypedDict):
success: bool
timeout: float | None
image: JSImage
errors: list[JSError]
messages: list[JSMessage]
def run_in_chrome(url: str, local_wait: int | None = None, global_wait: int | None = None) -> JSResult:
return _run_in_browser(_get_chrome(), url, local_wait, global_wait)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _get_chrome() -> list[str]:
return ["node", join(dirname(__file__), "chrome_screenshot.js")]
def _run_in_browser(engine: list[str], url: str, local_wait: int | None = None, global_wait: int | None = None) -> JSResult:
"""
wait is in milliseconds
"""
cmd = [*engine, url]
if local_wait is not None:
cmd += [str(local_wait)]
if global_wait is not None:
cmd += [str(global_wait)]
trace(f"Running command: {' '.join(cmd)}")
env = os.environ.copy()
env["NODE_PATH"] = join(TOP_PATH, 'bokehjs', 'node_modules')
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
except OSError as e:
fail(f"Failed to run: {' '.join(cmd)}")
fail(str(e))
sys.exit(1)
with proc:
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
output = stderr.decode("utf-8")
fail(output)
sys.exit(1)
output = stdout.decode("utf-8")
return json.loads(output)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| JSResult |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 17905,
"end": 18232
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"SAML_EXTERNAL_IDENTITY_MISSING",
"SAML_SSO_ENFORCEMENT_REQUIRES_EXTERNAL_IDENTITY",
"TWO_FACTOR_REQUIREMENT_NON_COMPLIANCE",
)
| OrgRemoveBillingManagerAuditEntryReason |
python | paramiko__paramiko | paramiko/config.py | {
"start": 23861,
"end": 25192
} | class ____:
"""
Returns the host's fqdn on request as string.
"""
def __init__(self, config, host=None):
self.fqdn = None
self.config = config
self.host = host
def __str__(self):
if self.fqdn is None:
#
# If the SSH config contains AddressFamily, use that when
# determining the local host's FQDN. Using socket.getfqdn() from
# the standard library is the most general solution, but can
# result in noticeable delays on some platforms when IPv6 is
# misconfigured or not available, as it calls getaddrinfo with no
# address family specified, so both IPv4 and IPv6 are checked.
#
# Handle specific option
fqdn = None
results = _addressfamily_host_lookup(self.host, self.config)
if results is not None:
for res in results:
af, socktype, proto, canonname, sa = res
if canonname and "." in canonname:
fqdn = canonname
break
# Handle 'any' / unspecified / lookup failure
if fqdn is None:
fqdn = socket.getfqdn()
# Cache
self.fqdn = fqdn
return self.fqdn
| LazyFqdn |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 120704,
"end": 123798
} | class ____(PeekableMixinTests, TestCase):
cls = mi.seekable
def test_exhaustion_reset(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(list(s), iterable) # Normal iteration
self.assertEqual(list(s), []) # Iterable is exhausted
s.seek(0)
self.assertEqual(list(s), iterable) # Back in action
def test_partial_reset(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(5, s), iterable[:5]) # Normal iteration
s.seek(1)
self.assertEqual(list(s), iterable[1:]) # Get the rest of the iterable
def test_forward(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration
s.seek(3) # Skip over index 2
self.assertEqual(list(s), iterable[3:]) # Result is similar to slicing
s.seek(0) # Back to 0
self.assertEqual(list(s), iterable) # No difference in result
def test_past_end(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration
s.seek(20)
self.assertEqual(list(s), []) # Iterable is exhausted
s.seek(0) # Back to 0
self.assertEqual(list(s), iterable) # No difference in result
def test_elements(self):
iterable = map(str, count())
s = mi.seekable(iterable)
mi.take(10, s)
elements = s.elements()
self.assertEqual(
[elements[i] for i in range(10)], [str(n) for n in range(10)]
)
self.assertEqual(len(elements), 10)
mi.take(10, s)
self.assertEqual(list(elements), [str(n) for n in range(20)])
def test_maxlen(self):
iterable = map(str, count())
s = mi.seekable(iterable, maxlen=4)
self.assertEqual(mi.take(10, s), [str(n) for n in range(10)])
self.assertEqual(list(s.elements()), ['6', '7', '8', '9'])
s.seek(0)
self.assertEqual(mi.take(14, s), [str(n) for n in range(6, 20)])
self.assertEqual(list(s.elements()), ['16', '17', '18', '19'])
def test_maxlen_zero(self):
iterable = [str(x) for x in range(5)]
s = mi.seekable(iterable, maxlen=0)
self.assertEqual(list(s), iterable)
self.assertEqual(list(s.elements()), [])
def test_relative_seek(self):
iterable = [str(x) for x in range(5)]
s = mi.seekable(iterable)
s.relative_seek(2)
self.assertEqual(next(s), '2')
s.relative_seek(-2)
self.assertEqual(next(s), '1')
s.relative_seek(-2)
self.assertEqual(
next(s), '0'
) # Seek relative to current position within the cache
s.relative_seek(-10) # Lower bound
self.assertEqual(next(s), '0')
s.relative_seek(10) # Lower bound
self.assertEqual(list(s.elements()), [str(x) for x in range(5)])
| SeekableTest |
python | numba__numba | numba/core/typed_passes.py | {
"start": 19929,
"end": 20280
} | class ____(BaseNativeLowering):
"""Lowering pass for a native function IR described using Numba's standard
`numba.core.ir` nodes and also parfor.Parfor nodes."""
_name = "native_parfor_lowering"
@property
def lowering_class(self):
return ParforLower
@register_pass(mutates_CFG=False, analysis_only=True)
| NativeParforLowering |
python | django__django | tests/admin_views/custom_has_permission_admin.py | {
"start": 441,
"end": 690
} | class ____(AuthenticationForm):
def confirm_login_allowed(self, user):
if not user.is_active or not (user.is_staff or user.has_perm(PERMISSION_NAME)):
raise ValidationError("permission denied")
| PermissionAdminAuthenticationForm |
python | celery__celery | t/unit/app/test_defaults.py | {
"start": 212,
"end": 1608
} | class ____:
def setup_method(self):
self._prev = sys.modules.pop('celery.app.defaults', None)
def teardown_method(self):
if self._prev:
sys.modules['celery.app.defaults'] = self._prev
def test_option_repr(self):
assert repr(NAMESPACES['broker']['url'])
def test_any(self):
val = object()
assert self.defaults.Option.typemap['any'](val) is val
def test_compat_indices(self):
assert not any(key.isupper() for key in DEFAULTS)
assert not any(key.islower() for key in _OLD_DEFAULTS)
assert not any(key.isupper() for key in _TO_OLD_KEY)
assert not any(key.islower() for key in _TO_NEW_KEY)
assert not any(key.isupper() for key in SETTING_KEYS)
assert not any(key.islower() for key in _OLD_SETTING_KEYS)
assert not any(value.isupper() for value in _TO_NEW_KEY.values())
assert not any(value.islower() for value in _TO_OLD_KEY.values())
for key in _TO_NEW_KEY:
assert key in _OLD_SETTING_KEYS
for key in _TO_OLD_KEY:
assert key in SETTING_KEYS
def test_find(self):
find = self.defaults.find
assert find('default_queue')[2].default == 'celery'
assert find('task_default_exchange')[2] is None
@property
def defaults(self):
return import_module('celery.app.defaults')
| test_defaults |
python | jazzband__django-oauth-toolkit | oauth2_provider/validators.py | {
"start": 190,
"end": 611
} | class ____(URLValidator):
scheme_re = r"^(?:[a-z][a-z0-9\.\-\+]*)://"
dotless_domain_re = r"(?!-)[A-Z\d-]{1,63}(?<!-)"
host_re = "|".join(
(r"(?:" + URLValidator.host_re, URLValidator.ipv4_re, URLValidator.ipv6_re, dotless_domain_re + ")")
)
port_re = r"(?::\d{2,5})?"
path_re = r"(?:[/?#][^\s]*)?"
regex = re.compile(scheme_re + host_re + port_re + path_re, re.IGNORECASE)
| URIValidator |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_test.py | {
"start": 8025,
"end": 56429
} | class ____(parameterized.TestCase, test.TestCase):
@parameterized.named_parameters(
("_enable_async_ckpt", True),
("_disable_async_ckpt", False)
)
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self, enable_async_ckpt):
v = _OwnsMirroredVariables()
checkpoint = trackable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
save_path = checkpoint.save(file_prefix=prefix, options=ckpt_options)
# TODO(chienchunh): Identify why sync needs to be called here.
if enable_async_ckpt:
checkpoint.sync()
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(file_prefix=prefix, options=ckpt_options)
# TODO(chienchunh): Identify why sync needs to be called here.
if enable_async_ckpt:
checkpoint.sync()
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@parameterized.named_parameters(
("_enable_async_ckpt", True),
("_disable_async_ckpt", False)
)
@test_util.run_in_graph_and_eager_modes
def testAssertConsumedNoCheckpoint(self, enable_async_ckpt):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
save_path = ckpt.save(file_prefix=prefix, options=ckpt_options)
status = ckpt.restore(save_path=save_path)
del ckpt
status.assert_consumed()
def testDeepCopyCheckpoint(self):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variables_lib.Variable(1.)
original_ckpt = trackable_utils.Checkpoint(v=v)
copied_ckpt = copy.deepcopy(original_ckpt)
copied_ckpt.v.assign(2.)
self.assertAllClose(1., v)
save_path = copied_ckpt.save(file_prefix=prefix)
original_ckpt.restore(save_path=save_path).assert_consumed()
self.assertAllClose(2., v)
@test_util.run_in_graph_and_eager_modes
def testPassingCheckpointOptions(self):
localhost = "/job:localhost/device:CPU:0"
options = checkpoint_options.CheckpointOptions(
experimental_io_device=localhost)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
save_path = ckpt.save(file_prefix=prefix, options=options)
status = ckpt.restore(save_path=save_path, options=options)
del ckpt
status.assert_consumed()
# In graph mode, verify that the save and restore ops were set to run on
# localhost.
if not context.executing_eagerly():
for op in ops.get_default_graph().get_operations():
if op.type in ("SaveV2", "RestoreV2"):
self.assertEqual(localhost, op.device)
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = trackable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
saver = trackable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = trackable_utils.Checkpoint()
status = checkpoint.restore(prefix)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@parameterized.named_parameters(
("_enable_async_ckpt", True),
("_disable_async_ckpt", False)
)
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self, enable_async_ckpt):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(step=step)
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)),
options=ckpt_options)
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
def testPartialRestoreWarningAttribute(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
with test.mock.patch.object(logging, "warning") as mock_log:
# Note: Unlike in testPartialRestoreWarningObject, the warning actually
# prints immediately here, since all of the objects have been created
# and there's no deferred restoration sitting around.
partial_root.restore(save_path)
self.assertEqual(3., partial_root.v2.numpy())
del partial_root
self.assertIsNone(weak_partial_root())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v1", messages)
self.assertNotIn("(root).v2", messages)
self.assertIn("expect_partial()", messages)
def testAttributeException(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
status = partial_root.restore(save_path)
with self.assertRaisesRegex(AssertionError,
r"Unused attributes(.|\n)*\(root\).v1"):
status.assert_consumed()
def testSilencePartialWarning(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path).expect_partial()
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
self.assertEmpty(mock_log.call_args_list)
def _get_checkpoint_name(self, name):
root = autotrackable.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
checkpoint_key = _get_all_checkpoint_names(root)[0]
with ops.name_scope("root/" + checkpoint_key):
pass # Make sure we can use this as an op name if we prefix it.
return checkpoint_key
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = autotrackable.AutoTrackable()
leaf = autotrackable.AutoTrackable()
root.leaf = leaf
trackable_utils.add_variable(leaf, name="v", shape=[])
checkpoint_key = _get_all_checkpoint_names(root)[0]
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = autotrackable.AutoTrackable()
leaf = autotrackable.AutoTrackable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_trackable(leaf, name=".ATTRIBUTES")
trackable_utils.add_variable(trackable=leaf, name="a", shape=[])
checkpoint_key = _get_all_checkpoint_names(root)[0]
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(autotrackable.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(trackable_utils.Checkpoint):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = original.save(checkpoint_prefix)
load_into = LateDependencies()
status = load_into.restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(autotrackable.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(trackable_utils.Checkpoint):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = dep_after_var.save(checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = loaded_dep_after_var.restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep = autotrackable.AutoTrackable()
save_root.dep.var = trackable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
first_path = save_root.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = save_root.save(os.path.join(checkpoint_directory, "second"))
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
first_status = first_root.restore(first_path)
second_status = second_root.restore(second_path)
load_dep = autotrackable.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
second_status = second_root.restore(second_path)
first_status = first_root.restore(first_path)
load_dep = autotrackable.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = autotrackable.AutoTrackable()
save_root.dep_two = autotrackable.AutoTrackable()
dep_three = autotrackable.AutoTrackable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
trackable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
status = load_root.restore(save_path)
load_root.dep_one = autotrackable.AutoTrackable()
load_root.dep_two = autotrackable.AutoTrackable()
load_root.dep_one.dep_three = autotrackable.AutoTrackable()
load_root.dep_two.dep_three = autotrackable.AutoTrackable()
trackable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
trackable_utils.add_variable(
load_root.dep_two.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = autotrackable.AutoTrackable()
save_root.dep_two = autotrackable.AutoTrackable()
trackable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
trackable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
load_root.dep_one = autotrackable.AutoTrackable()
load_root.dep_two = load_root.dep_one
v1 = trackable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = trackable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = load_root.restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testEmptyContainersIgnored(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint(a=[])
path = save_root.save(checkpoint_directory)
load_root = trackable_utils.Checkpoint(b=[])
load_root.dep = []
load_root.dep.append([])
status = load_root.restore(path)
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = trackable_utils.Checkpoint()
second = trackable_utils.Checkpoint()
first.second = second
second.first = first
first.v = trackable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = trackable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(trackable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = first.save(os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = trackable_utils.Checkpoint()
status = first_load.restore(save_path)
second_load = autotrackable.AutoTrackable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = trackable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = trackable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = first_load.restore(save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = trackable_utils.Checkpoint()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = first.save(checkpoint_prefix)
second = trackable_utils.Checkpoint()
second.var2 = variables_lib.Variable(0., name="blah")
status = second.restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = autotrackable.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
saver = trackable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = autotrackable.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
checkpoint = trackable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_trackable_data_structure(self):
model = NonLayerTrackable()
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerTrackable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = trackable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_str_from_function(self):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint():
save_path = save_checkpoint.write(checkpoint_prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint())
load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.))
# Use read() instead of restore() which allows us to check that all
# existing objects were loaded.
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint())
self.evaluate(save_checkpoint.v.assign(0.))
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_tensor_from_function(self):
# Same as the previous test, but the path is a tensor not a python string.
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix)
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint(prefix):
save_path = save_checkpoint.write(prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.))
# Use read() instead of restore() which allows us to check that all
# existing objects were loaded.
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
self.evaluate(save_checkpoint.v.assign(0.))
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_tensor_does_not_exist_from_function(self):
# Same as the previous test, but the path is a tensor not a python string.
checkpoint_prefix = os.path.join(
self.get_temp_dir(), "DOES_NOT_EXIST", "ckpt")
checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix)
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint(prefix):
save_path = save_checkpoint.write(prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
with self.assertRaises(errors_impl.NotFoundError):
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
@parameterized.named_parameters(
("_enable_async_ckpt", True),
("_disable_async_ckpt", False))
def test_inititialize_with_data_structures(self, enable_async_ckpt):
checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(0.), variables_lib.Variable(1.)],
b={"a": variables_lib.Variable(2.), "b": variables_lib.Variable(3.)})
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
save_path = checkpoint.save(file_prefix=checkpoint_prefix,
options=ckpt_options)
load_checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(4.), variables_lib.Variable(5.)],
b={"a": variables_lib.Variable(6.), "b": variables_lib.Variable(7.)})
# When async checkpoint is enabled, we need to first make sure that the
# checkpoint saving is fully complete before the checkpoint file can be
# loaded by another checkpoint instance. Calling checkpoint.restore() is a
# trick to make sure its async thread is joined.
if enable_async_ckpt:
checkpoint.restore(save_path)
load_checkpoint.restore(save_path)
self.assertAllClose(self.evaluate(load_checkpoint.a), [0, 1])
self.assertAllClose(self.evaluate(load_checkpoint.b), {"a": 2, "b": 3})
def _create_trackable(self):
class Model(autotrackable.AutoTrackable):
def __init__(self):
self.v = variables_lib.Variable(2.)
def __call__(self, x):
return self.v * x
return Model()
def test_initialize_with_root_object(self):
model = self._create_trackable()
input_value = constant_op.constant([[3.]])
expected_output = self.evaluate(model(input_value))
model.deferred_variable = variables_lib.Variable(5.)
checkpoint = trackable_utils.Checkpoint(model)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
new_model = self._create_trackable()
load_checkpoint = trackable_utils.Checkpoint(new_model)
load_checkpoint.restore(save_path)
self.assertAllClose(expected_output, new_model(input_value))
new_model.deferred_variable = variables_lib.Variable(1.)
self.assertEqual(self.evaluate(new_model.deferred_variable), 5)
def test_initialize_with_root_object_and_kwargs(self):
model = self._create_trackable()
model.v.assign(3.)
separate_variable = variables_lib.Variable(5.)
with self.assertRaisesRegex(ValueError, "root.v already exists"):
trackable_utils.Checkpoint(model, v=separate_variable)
checkpoint = trackable_utils.Checkpoint(
model, separate_variable=separate_variable)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
# Case 1: Loading checkpoint with same configuration.
new_model = self._create_trackable()
separate_variable = variables_lib.Variable(1.)
load_checkpoint = trackable_utils.Checkpoint(
new_model, separate_variable=separate_variable)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(new_model.v), 3)
self.assertEqual(self.evaluate(separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
# Case 2: Loading checkpoint where v and separate_variable are swapped:
# v is not attached to the root, while separate variable is attached to root
new_model = autotrackable.AutoTrackable()
new_model.separate_variable = variables_lib.Variable(200.)
v = variables_lib.Variable(100.)
load_checkpoint = trackable_utils.Checkpoint(new_model, v=v)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(v), 3)
self.assertEqual(self.evaluate(new_model.separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
# Case 3: Loading checkpoint where no root object is specified
separate_variable = variables_lib.Variable(200.)
v = variables_lib.Variable(100.)
load_checkpoint = trackable_utils.Checkpoint(
v=v, separate_variable=separate_variable)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(v), 3)
self.assertEqual(self.evaluate(new_model.separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
def test_checkpoint_saved_model_compatibility(self):
model = self._create_trackable()
input_value = constant_op.constant([[3.]])
expected_output = self.evaluate(model(input_value))
model.deferred_variable = variables_lib.Variable(5.)
saved_model_dir = os.path.join(self.get_temp_dir(), "saved_model")
saved_model_save.save(model, saved_model_dir)
new_model = self._create_trackable()
load_checkpoint = trackable_utils.Checkpoint(new_model)
with self.assertRaisesRegex(
errors_impl.NotFoundError,
"Error when restoring from checkpoint or SavedModel"):
load_checkpoint.restore(saved_model_dir + "no").expect_partial()
load_checkpoint.restore(saved_model_dir).expect_partial()
self.assertAllClose(expected_output, new_model(input_value))
new_model.deferred_variable = variables_lib.Variable(1.)
self.assertEqual(self.evaluate(new_model.deferred_variable), 5)
def test_deferred_dependency_avoids_reference_cycles(self):
# Tests that there are no reference cycles when running garbage collection.
# Python uses reference counts as the primary garbage collector, which will
# not delete and finalize (__del__) objects in a cycle. The deletion is
# eventually triggered by gc, which only runs when the garbage has reached
# a certain threshold.
delete_counter = 0
class TrackableWithDel(autotrackable.AutoTrackable):
def __del__(self):
nonlocal delete_counter
delete_counter += 1
x = autotrackable.AutoTrackable()
x.v = variables_lib.Variable(100.)
x.has_del = TrackableWithDel()
checkpoint = trackable_utils.Checkpoint(x)
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.assertEqual(delete_counter, 0)
del checkpoint
del x
self.assertEqual(delete_counter, 1)
no_v = autotrackable.AutoTrackable()
no_v.has_del = TrackableWithDel()
checkpoint = trackable_utils.Checkpoint(no_v)
checkpoint.restore(save_path).expect_partial()
del checkpoint
del no_v
self.assertEqual(delete_counter, 2)
def test_defer_objects_with_values_only(self):
# Tests that deferred dependencies are only added if the node in the
# object graph has children or checkpointed values.
root = autotrackable.AutoTrackable()
root.branch_with_value = autotrackable.AutoTrackable()
root.branch_with_value.v = variables_lib.Variable(5.0)
root.branch_no_value = autotrackable.AutoTrackable()
root.branch_no_value.child = autotrackable.AutoTrackable()
root.v = variables_lib.Variable(1.0)
checkpoint = trackable_utils.Checkpoint(model=root)
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
new_root = autotrackable.AutoTrackable()
checkpoint = trackable_utils.Checkpoint(model=new_root)
checkpoint.restore(save_path)
# root should have two nodes with values/children (`branch-with_value`/`v`).
self.assertLen(new_root._deferred_dependencies, 2)
new_root.branch_no_value = autotrackable.AutoTrackable()
self.assertLen(new_root._deferred_dependencies, 2)
new_root.branch_with_value = autotrackable.AutoTrackable()
self.assertLen(new_root._deferred_dependencies, 1)
new_root.v = variables_lib.Variable(1.0)
self.assertEmpty(new_root._deferred_dependencies, 1)
def test_root_arg(self):
root = autotrackable.AutoTrackable()
root.v = variables_lib.Variable(1)
w = variables_lib.Variable(2)
y = variables_lib.Variable(3)
root_ckpt = trackable_utils.Checkpoint(root=root, w=w, y=y)
root2 = autotrackable.AutoTrackable()
root2.w = variables_lib.Variable(4)
v2 = variables_lib.Variable(5)
z = variables_lib.Variable(6)
root2_ckpt = trackable_utils.Checkpoint(root=root2,
v=v2,
z=z)
root_save_path = root_ckpt.save(os.path.join(self.get_temp_dir(),
"root_ckpt"))
root2_save_path = root2_ckpt.save(os.path.join(self.get_temp_dir(),
"root2_ckpt"))
root_ckpt.restore(root2_save_path)
root2_ckpt.restore(root_save_path)
self.assertEqual(root.v.numpy(), 5)
self.assertEqual(w.numpy(), 4)
self.assertEqual(y.numpy(), 3)
self.assertEqual(root2.w.numpy(), 2)
self.assertEqual(v2.numpy(), 1)
self.assertEqual(z.numpy(), 6)
def test_weakref_root(self):
root = autotrackable.AutoTrackable()
root.v = variables_lib.Variable(1)
ref = root.v.ref()
ckpt = trackable_utils.Checkpoint(root=weakref.ref(root))
save_path = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt"))
root.v.assign(2)
ckpt.restore(save_path)
self.assertEqual(root.v.numpy(), 1)
del root
# Verifying if the variable is only referenced from `ref`.
# We expect the reference counter to be 1, but `sys.getrefcount` reports
# one higher reference counter because a temporary is created when we call
# sys.getrefcount(). Hence check if the number returned is 2.
# https://docs.python.org/3/library/sys.html#sys.getrefcount
self.assertEqual(sys.getrefcount(ref.deref()), 2)
def test_restore_incompatible_shape(self):
v = variables_lib.Variable([1.0, 1.0])
w = variables_lib.Variable([1.0])
ckpt = trackable_utils.Checkpoint(v=v)
save_path = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt"))
with self.assertRaisesRegex(ValueError, "incompatible tensor with shape"):
trackable_utils.Checkpoint(v=w).restore(save_path)
def test_save_restore_fspath(self):
v = variables_lib.Variable(1.0)
w = variables_lib.Variable(0.0)
ckpt = trackable_utils.Checkpoint(v=v)
prefix = pathlib.Path(self.get_temp_dir()) / "ckpt"
save_path = ckpt.save(prefix)
save_path = pathlib.Path(save_path)
ckpt2 = trackable_utils.Checkpoint(v=w)
ckpt2.restore(save_path)
self.assertEqual(ckpt.v.numpy(), 1.0)
def test_read_write_fspath(self):
v = variables_lib.Variable(1.0)
w = variables_lib.Variable(0.0)
ckpt = trackable_utils.Checkpoint(v=v)
prefix = pathlib.Path(self.get_temp_dir()) / "ckpt"
save_path = ckpt.write(prefix)
save_path = pathlib.Path(save_path)
ckpt2 = trackable_utils.Checkpoint(v=w)
ckpt2.read(save_path)
self.assertEqual(ckpt.v.numpy(), 1.0)
@test_util.run_deprecated_v1
def test_save_in_graph_but_no_session(self):
v = variables_lib.Variable(1.0)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(v.initializer)
prefix = pathlib.Path(self.get_temp_dir()) / "ckpt"
with stack.default_session(None):
with self.assertRaisesRegex(RuntimeError, "create a session"):
ckpt.write(prefix)
def test_ckpt_files_closed_after_restoration(self):
if not psutil_import_succeeded:
self.skipTest(
"psutil is required to check that we've closed our files.")
root = autotrackable.AutoTrackable()
root.v = variables_lib.Variable(1)
ckpt = trackable_utils.Checkpoint(root=root)
save_path = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt"))
root2 = autotrackable.AutoTrackable()
ckpt2 = trackable_utils.Checkpoint(root=root2)
ckpt2.restore(save_path)
proc = psutil.Process()
for file in proc.open_files():
self.assertNotIn(save_path, file[0])
def testLookupCache(self):
"""Ensure that Checkpoint.restore passes cached dependencies to lookup."""
root = autotrackable.AutoTrackable()
root.v1 = variables_lib.Variable(1)
root.v2 = variables_lib.Variable(2)
root.v3 = variables_lib.Variable(3)
ckpt = trackable_utils.Checkpoint(model=root)
save_path = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt"))
called_with_cache = []
class LookupOverride(autotrackable.AutoTrackable):
def _lookup_dependency(self, name, cached_dependencies=None):
if cached_dependencies is not None:
called_with_cache.append(name)
return super()._lookup_dependency(name, cached_dependencies)
root2 = LookupOverride()
ckpt2 = trackable_utils.Checkpoint(model=root2)
ckpt2.restore(save_path)
self.assertCountEqual(called_with_cache, ["v1", "v2", "v3"])
@parameterized.named_parameters(
("_enable_async_ckpt", True),
("_disable_async_ckpt", False)
)
def testCallbackWithManager(self, enable_async_ckpt):
"""Tests experimental_write_callback with a checkpoint manager."""
# 1. Define checkpoint and manager accordingly
v = variables_lib.Variable(1.)
if enable_async_ckpt:
ckpt = async_checkpoint_helper.AsyncCheckpointHelper(
trackable_utils.Checkpoint,
v=v
)
else:
ckpt = trackable_utils.Checkpoint(v=v)
checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint=ckpt,
directory=os.path.join(self.get_temp_dir(), "ckpt"),
max_to_keep=None,
checkpoint_name="test-callback",
)
# 2. Define 2 callbacks, which will be executed in order as stated in the
# expected behavior of `CheckpointOptions.experimental_write_callbacks`
testing_list = []
test_str = "callback 2 was here"
# Define callback 1 that takes in 1 argument
def my_callback_1(save_path):
testing_list.append(save_path)
# Define callback 2 that takes in 0 argument
def my_callback_2():
testing_list.append(test_str)
# 3. Save with `options`
options = checkpoint_options.CheckpointOptions(
experimental_write_callbacks=[my_callback_1, my_callback_2]
)
save_path = checkpoint_manager.save(options=options)
# 4. Assert results
if enable_async_ckpt:
checkpoint_manager.sync() # otherwise callbacks may not have finished
# Ensure that user's options is not mutated by internal mechanisms. Here,
# we would internally register a callback `_record_and_sweep_state()`.
# Users should not have access to it, hence length still being 2.
self.assertLen(options.experimental_write_callbacks, 2)
# Ensure `_record_and_sweep_state()` executes and sets `_latest_checkpoint`
self.assertEqual(save_path, checkpoint_manager._latest_checkpoint)
# Ensure my_callback_1 is executed first
self.assertEqual(save_path, testing_list[0])
# Ensure my_callback_2 is executed second
self.assertEqual(test_str, testing_list[1])
# Ensure nothing else is written to `testing_list`
self.assertLen(testing_list, 2)
@parameterized.named_parameters(
("_async_ckpt_save", True, False, True),
("_async_ckpt_write", True, False, False),
("_regular_ckptV1_save", False, True, True),
("_regular_ckptV1_write", False, True, False),
("_regular_ckptV2_save", False, False, True),
("_regular_ckptV2_write", False, False, False)
)
def testCallbackWithoutManager(self, enable_async_ckpt, use_v1, use_save):
"""Tests experimental_write_callback without using a checkpoint manager."""
# Note that the underlying checkpoint of `AsyncCheckpoint.save()` will call
# `Checkpoint.save()`.
# The underlying checkpoint of `AsyncCheckpoint.write()` will call
# `Checkpoint.write()`.
# 1. Define checkpoint instance accordingly
v = variables_lib.Variable(1.)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
if enable_async_ckpt:
ckpt = async_checkpoint_helper.AsyncCheckpointHelper(
trackable_utils.Checkpoint,
v=v
)
else:
if use_v1:
ckpt = trackable_utils.CheckpointV1(v=v)
else:
ckpt = trackable_utils.Checkpoint(v=v)
# 2. Define 2 callbacks, which will be executed in order as stated in the
# expected behavior of `CheckpointOptions.experimental_write_callbacks`
testing_list = []
test_str = "callback 2 was here"
# Define callback 1 that takes in 1 argument
def my_callback_1(save_path):
testing_list.append(save_path)
# Define callback 2 that takes in 0 argument
def my_callback_2():
testing_list.append(test_str)
# 3. Save with `options`
options = checkpoint_options.CheckpointOptions(
experimental_write_callbacks=[my_callback_1, my_callback_2]
)
if use_save:
save_path = ckpt.save(prefix, options=options)
else:
save_path = ckpt.write(prefix, options=options)
# 4. Assert results
if enable_async_ckpt:
ckpt.sync() # otherwise callbacks may not have finished
# Ensure that user's options is not mutated by internal mechanisms. Here,
# if we are using regular checkpoint's save(), we would internally register
# a callback `_update_checkpoint_state_internal()`. Users should not have
# access to it, hence length still being 2.
self.assertLen(options.experimental_write_callbacks, 2)
# Ensure my_callback_1 is executed first
self.assertEqual(save_path, testing_list[0])
# Ensure my_callback_2 is executed second
self.assertEqual(test_str, testing_list[1])
# Ensure nothing else is written to `testing_list`
self.assertLen(testing_list, 2)
def test_callback_argument_error(self):
"""Ensure passing in a callback with more than 1 argument raises error."""
# Define callback 1 that takes in 1 argument
def my_callback_1(save_path):
return save_path
# Define callback 2 that takes in 2 arguments (would raise error)
def my_callback_2(save_path, another_argument):
return save_path, another_argument
with self.assertRaises(AssertionError):
_ = checkpoint_options.CheckpointOptions(
experimental_write_callbacks=[my_callback_1, my_callback_2]
)
def test_checkpoint_options_copyable(self):
"""Ensure that `CheckpointOptions` can be copied with `copy.deepcopy()`."""
def my_callback(save_path):
return save_path
def my_callback_2(save_path):
return save_path + "some string"
options_original = checkpoint_options.CheckpointOptions(
experimental_io_device="CPU:0",
enable_async=True,
experimental_write_callbacks=[my_callback]
)
options_copy = copy.copy(options_original)
options_copy.enable_async = False
options_copy.experimental_io_device = "CPU:1"
options_copy.experimental_write_callbacks.append(my_callback_2)
# Check that the original options instance is not affected
self.assertEqual(options_original.experimental_io_device, "CPU:0")
self.assertEqual(options_original.enable_async, True)
self.assertLen(options_original.experimental_write_callbacks, 1)
| CheckpointingTests |
python | PyCQA__pylint | tests/functional/m/mixin_class_rgx.py | {
"start": 126,
"end": 250
} | class ____:
"""Class that does not match the option pattern"""
def __aenter__(self):
pass
| AsyncManagerMixedin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.