language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/tests/test_ray_event_export_task_events.py | {
"start": 39032,
"end": 52844
} | class ____:
def __init__(self):
pass
def task(self, arg):
pass
actor = Actor.remote()
obj = ray.put("test")
ray.get(actor.task.remote(obj))
"""
def validate_events(events: json):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
actor_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(event, preserve_proto_field_name)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
elif event["event_type"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actor_task_definition_event"]["task_id"]
assert actor_task_id is not None
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_name"]
== "task"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["actor_task_definition_event"]["actor_task_name"]
== "Actor.task"
)
assert (
event["actor_task_definition_event"]["required_resources"]
== {}
)
assert (
event["actor_task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["actor_task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["actor_task_definition_event"]["task_attempt"] == 0
assert (
event["actor_task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(event, preserve_proto_field_name)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
elif event["eventType"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actorTaskDefinitionEvent"]["taskId"]
assert actor_task_id is not None
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "task"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["actorTaskDefinitionEvent"]["actorTaskName"]
== "Actor.task"
)
assert (
event["actorTaskDefinitionEvent"]["requiredResources"] == {}
)
assert (
event["actorTaskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["actorTaskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["actorTaskDefinitionEvent"]["taskAttempt"] == 0
assert event["actorTaskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
assert actor_task_definition_received
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"RUNNING",
"FINISHED",
}
expected_actor_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"SUBMITTED_TO_WORKER",
"PENDING_ACTOR_TASK_ARGS_FETCH",
"PENDING_ACTOR_TASK_ORDERING_OR_CONCURRENCY",
"RUNNING",
"FINISHED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
(actor_task_id, 0): expected_actor_task_states,
}
expected_task_id_error_info_dict = {}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_actor_creation_failed(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = """
import ray
import ray.util.state
from ray._common.test_utils import wait_for_condition
import time
@ray.remote(num_cpus=1)
| Actor |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 1787,
"end": 21077
} | class ____(unittest.TestCase):
def test_naked_string_cells(self):
"""Test that cells with only naked strings are fully executed"""
# First, single-line inputs
ip.run_cell('"a"\n')
self.assertEqual(ip.user_ns["_"], "a")
# And also multi-line cells
ip.run_cell('"""a\nb"""\n')
self.assertEqual(ip.user_ns["_"], "a\nb")
def test_run_empty_cell(self):
"""Just make sure we don't get a horrible error with a blank
cell of input. Yes, I did overlook that."""
old_xc = ip.execution_count
res = ip.run_cell("")
self.assertEqual(ip.execution_count, old_xc)
self.assertEqual(res.execution_count, None)
def test_run_cell_multiline(self):
"""Multi-block, multi-line cells must execute correctly."""
src = "\n".join(
[
"x=1",
"y=2",
"if 1:",
" x += 1",
" y += 1",
]
)
res = ip.run_cell(src)
self.assertEqual(ip.user_ns["x"], 2)
self.assertEqual(ip.user_ns["y"], 3)
self.assertEqual(res.success, True)
self.assertEqual(res.result, None)
def test_multiline_string_cells(self):
"Code sprinkled with multiline strings should execute (GH-306)"
ip.run_cell("tmp=0")
self.assertEqual(ip.user_ns["tmp"], 0)
res = ip.run_cell('tmp=1;"""a\nb"""\n')
self.assertEqual(ip.user_ns["tmp"], 1)
self.assertEqual(res.success, True)
self.assertEqual(res.result, "a\nb")
def test_dont_cache_with_semicolon(self):
"Ending a line with semicolon should not cache the returned object (GH-307)"
oldlen = len(ip.user_ns["Out"])
for cell in ["1;", "1;1;"]:
res = ip.run_cell(cell, store_history=True)
newlen = len(ip.user_ns["Out"])
self.assertEqual(oldlen, newlen)
self.assertIsNone(res.result)
i = 0
# also test the default caching behavior
for cell in ["1", "1;1"]:
ip.run_cell(cell, store_history=True)
newlen = len(ip.user_ns["Out"])
i += 1
self.assertEqual(oldlen + i, newlen)
def test_syntax_error(self):
res = ip.run_cell("raise = 3")
self.assertIsInstance(res.error_before_exec, SyntaxError)
def test_open_standard_input_stream(self):
res = ip.run_cell("open(0)")
self.assertIsInstance(res.error_in_exec, ValueError)
def test_open_standard_output_stream(self):
res = ip.run_cell("open(1)")
self.assertIsInstance(res.error_in_exec, ValueError)
def test_open_standard_error_stream(self):
res = ip.run_cell("open(2)")
self.assertIsInstance(res.error_in_exec, ValueError)
def test_In_variable(self):
"Verify that In variable grows with user input (GH-284)"
oldlen = len(ip.user_ns["In"])
ip.run_cell("1;", store_history=True)
newlen = len(ip.user_ns["In"])
self.assertEqual(oldlen + 1, newlen)
self.assertEqual(ip.user_ns["In"][-1], "1;")
def test_magic_names_in_string(self):
ip.run_cell('a = """\n%exit\n"""')
self.assertEqual(ip.user_ns["a"], "\n%exit\n")
def test_trailing_newline(self):
"""test that running !(command) does not raise a SyntaxError"""
ip.run_cell("!(true)\n", False)
ip.run_cell("!(true)\n\n\n", False)
def test_gh_597(self):
"""Pretty-printing lists of objects with non-ascii reprs may cause
problems."""
class Spam(object):
def __repr__(self):
return "\xe9" * 50
import IPython.core.formatters
f = IPython.core.formatters.PlainTextFormatter()
f([Spam(), Spam()])
def test_future_flags(self):
"""Check that future flags are used for parsing code (gh-777)"""
ip.run_cell("from __future__ import barry_as_FLUFL")
try:
ip.run_cell("prfunc_return_val = 1 <> 2")
assert "prfunc_return_val" in ip.user_ns
finally:
# Reset compiler flags so we don't mess up other tests.
ip.compile.reset_compiler_flags()
def test_can_pickle(self):
"Can we pickle objects defined interactively (GH-29)"
ip = get_ipython()
ip.reset()
ip.run_cell(
(
"class Mylist(list):\n"
" def __init__(self,x=[]):\n"
" list.__init__(self,x)"
)
)
ip.run_cell("w=Mylist([1,2,3])")
from pickle import dumps
# We need to swap in our main module - this is only necessary
# inside the test framework, because IPython puts the interactive module
# in place (but the test framework undoes this).
_main = sys.modules["__main__"]
sys.modules["__main__"] = ip.user_module
try:
res = dumps(ip.user_ns["w"])
finally:
sys.modules["__main__"] = _main
self.assertTrue(isinstance(res, bytes))
def test_global_ns(self):
"Code in functions must be able to access variables outside them."
ip = get_ipython()
ip.run_cell("a = 10")
ip.run_cell(("def f(x):\n" " return x + a"))
ip.run_cell("b = f(12)")
self.assertEqual(ip.user_ns["b"], 22)
def test_bad_custom_tb(self):
"""Check that InteractiveShell is protected from bad custom exception handlers"""
ip.set_custom_exc((IOError,), lambda etype, value, tb: 1 / 0)
self.assertEqual(ip.custom_exceptions, (IOError,))
with tt.AssertPrints("Custom TB Handler failed", channel="stderr"):
ip.run_cell('raise IOError("foo")')
self.assertEqual(ip.custom_exceptions, ())
def test_bad_custom_tb_return(self):
"""Check that InteractiveShell is protected from bad return types in custom exception handlers"""
ip.set_custom_exc((NameError,), lambda etype, value, tb, tb_offset=None: 1)
self.assertEqual(ip.custom_exceptions, (NameError,))
with tt.AssertPrints("Custom TB Handler failed", channel="stderr"):
ip.run_cell("a=abracadabra")
self.assertEqual(ip.custom_exceptions, ())
def test_drop_by_id(self):
myvars = {"a": object(), "b": object(), "c": object()}
ip.push(myvars, interactive=False)
for name in myvars:
assert name in ip.user_ns, name
assert name in ip.user_ns_hidden, name
ip.user_ns["b"] = 12
ip.drop_by_id(myvars)
for name in ["a", "c"]:
assert name not in ip.user_ns, name
assert name not in ip.user_ns_hidden, name
assert ip.user_ns["b"] == 12
ip.reset()
def test_var_expand(self):
ip.user_ns["f"] = "Ca\xf1o"
self.assertEqual(ip.var_expand("echo $f"), "echo Ca\xf1o")
self.assertEqual(ip.var_expand("echo {f}"), "echo Ca\xf1o")
self.assertEqual(ip.var_expand("echo {f[:-1]}"), "echo Ca\xf1")
self.assertEqual(ip.var_expand("echo {1*2}"), "echo 2")
self.assertEqual(
ip.var_expand("grep x | awk '{print $1}'"), "grep x | awk '{print $1}'"
)
ip.user_ns["f"] = b"Ca\xc3\xb1o"
# This should not raise any exception:
ip.var_expand("echo $f")
def test_var_expand_local(self):
"""Test local variable expansion in !system and %magic calls"""
# !system
ip.run_cell(
"def test():\n"
' lvar = "ttt"\n'
" ret = !echo {lvar}\n"
" return ret[0]\n"
)
res = ip.user_ns["test"]()
self.assertIn("ttt", res)
# %magic
ip.run_cell(
"def makemacro():\n"
' macroname = "macro_var_expand_locals"\n'
" %macro {macroname} codestr\n"
)
ip.user_ns["codestr"] = "str(12)"
ip.run_cell("makemacro()")
self.assertIn("macro_var_expand_locals", ip.user_ns)
def test_var_expand_self(self):
"""Test variable expansion with the name 'self', which was failing.
See https://github.com/ipython/ipython/issues/1878#issuecomment-7698218
"""
ip.run_cell(
"class cTest:\n"
' classvar="see me"\n'
" def test(self):\n"
" res = !echo Variable: {self.classvar}\n"
" return res[0]\n"
)
self.assertIn("see me", ip.user_ns["cTest"]().test())
def test_bad_var_expand(self):
"""var_expand on invalid formats shouldn't raise"""
# SyntaxError
self.assertEqual(ip.var_expand("{'a':5}"), "{'a':5}")
# NameError
self.assertEqual(ip.var_expand("{asdf}"), "{asdf}")
# ZeroDivisionError
self.assertEqual(ip.var_expand("{1/0}"), "{1/0}")
def test_silent_postexec(self):
"""run_cell(silent=True) doesn't invoke pre/post_run_cell callbacks"""
pre_explicit = mock.Mock()
pre_always = mock.Mock()
post_explicit = mock.Mock()
post_always = mock.Mock()
all_mocks = [pre_explicit, pre_always, post_explicit, post_always]
ip.events.register("pre_run_cell", pre_explicit)
ip.events.register("pre_execute", pre_always)
ip.events.register("post_run_cell", post_explicit)
ip.events.register("post_execute", post_always)
try:
ip.run_cell("1", silent=True)
assert pre_always.called
assert not pre_explicit.called
assert post_always.called
assert not post_explicit.called
# double-check that non-silent exec did what we expected
# silent to avoid
ip.run_cell("1")
assert pre_explicit.called
assert post_explicit.called
(info,) = pre_explicit.call_args[0]
(result,) = post_explicit.call_args[0]
self.assertEqual(info, result.info)
# check that post hooks are always called
[m.reset_mock() for m in all_mocks]
ip.run_cell("syntax error")
assert pre_always.called
assert pre_explicit.called
assert post_always.called
assert post_explicit.called
(info,) = pre_explicit.call_args[0]
(result,) = post_explicit.call_args[0]
self.assertEqual(info, result.info)
finally:
# remove post-exec
ip.events.unregister("pre_run_cell", pre_explicit)
ip.events.unregister("pre_execute", pre_always)
ip.events.unregister("post_run_cell", post_explicit)
ip.events.unregister("post_execute", post_always)
def test_silent_noadvance(self):
"""run_cell(silent=True) doesn't advance execution_count"""
ec = ip.execution_count
# silent should force store_history=False
ip.run_cell("1", store_history=True, silent=True)
self.assertEqual(ec, ip.execution_count)
# double-check that non-silent exec did what we expected
# silent to avoid
ip.run_cell("1", store_history=True)
self.assertEqual(ec + 1, ip.execution_count)
def test_silent_nodisplayhook(self):
"""run_cell(silent=True) doesn't trigger displayhook"""
d = dict(called=False)
trap = ip.display_trap
save_hook = trap.hook
def failing_hook(*args, **kwargs):
d["called"] = True
try:
trap.hook = failing_hook
res = ip.run_cell("1", silent=True)
self.assertFalse(d["called"])
self.assertIsNone(res.result)
# double-check that non-silent exec did what we expected
# silent to avoid
ip.run_cell("1")
self.assertTrue(d["called"])
finally:
trap.hook = save_hook
def test_ofind_line_magic(self):
from IPython.core.magic import register_line_magic
@register_line_magic
def lmagic(line):
"A line magic"
# Get info on line magic
lfind = ip._ofind("lmagic")
info = OInfo(
found=True,
isalias=False,
ismagic=True,
namespace="IPython internal",
obj=lmagic,
parent=None,
)
self.assertEqual(lfind, info)
def test_ofind_cell_magic(self):
from IPython.core.magic import register_cell_magic
@register_cell_magic
def cmagic(line, cell):
"A cell magic"
# Get info on cell magic
find = ip._ofind("cmagic")
info = OInfo(
found=True,
isalias=False,
ismagic=True,
namespace="IPython internal",
obj=cmagic,
parent=None,
)
self.assertEqual(find, info)
def test_ofind_property_with_error(self):
class A(object):
@property
def foo(self):
raise NotImplementedError() # pragma: no cover
a = A()
found = ip._ofind("a.foo", [("locals", locals())])
info = OInfo(
found=True,
isalias=False,
ismagic=False,
namespace="locals",
obj=A.foo,
parent=a,
)
self.assertEqual(found, info)
def test_ofind_multiple_attribute_lookups(self):
class A(object):
@property
def foo(self):
raise NotImplementedError() # pragma: no cover
a = A()
a.a = A()
a.a.a = A()
found = ip._ofind("a.a.a.foo", [("locals", locals())])
info = OInfo(
found=True,
isalias=False,
ismagic=False,
namespace="locals",
obj=A.foo,
parent=a.a.a,
)
self.assertEqual(found, info)
def test_ofind_slotted_attributes(self):
class A(object):
__slots__ = ["foo"]
def __init__(self):
self.foo = "bar"
a = A()
found = ip._ofind("a.foo", [("locals", locals())])
info = OInfo(
found=True,
isalias=False,
ismagic=False,
namespace="locals",
obj=a.foo,
parent=a,
)
self.assertEqual(found, info)
found = ip._ofind("a.bar", [("locals", locals())])
expected = OInfo(
found=False,
isalias=False,
ismagic=False,
namespace=None,
obj=None,
parent=a,
)
assert found == expected
def test_ofind_prefers_property_to_instance_level_attribute(self):
class A(object):
@property
def foo(self):
return "bar"
a = A()
a.__dict__["foo"] = "baz"
self.assertEqual(a.foo, "bar")
found = ip._ofind("a.foo", [("locals", locals())])
self.assertIs(found.obj, A.foo)
def test_custom_syntaxerror_exception(self):
called = []
def my_handler(shell, etype, value, tb, tb_offset=None):
called.append(etype)
shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
ip.set_custom_exc((SyntaxError,), my_handler)
try:
ip.run_cell("1f")
# Check that this was called, and only once.
self.assertEqual(called, [SyntaxError])
finally:
# Reset the custom exception hook
ip.set_custom_exc((), None)
def test_custom_exception(self):
called = []
def my_handler(shell, etype, value, tb, tb_offset=None):
called.append(etype)
shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
ip.set_custom_exc((ValueError,), my_handler)
try:
res = ip.run_cell("raise ValueError('test')")
# Check that this was called, and only once.
self.assertEqual(called, [ValueError])
# Check that the error is on the result object
self.assertIsInstance(res.error_in_exec, ValueError)
finally:
# Reset the custom exception hook
ip.set_custom_exc((), None)
@mock.patch("builtins.print")
def test_showtraceback_with_surrogates(self, mocked_print):
values = []
def mock_print_func(value, sep=" ", end="\n", file=sys.stdout, flush=False):
values.append(value)
if value == chr(0xD8FF):
raise UnicodeEncodeError("utf-8", chr(0xD8FF), 0, 1, "")
# mock builtins.print
mocked_print.side_effect = mock_print_func
# ip._showtraceback() is replaced in globalipapp.py.
# Call original method to test.
interactiveshell.InteractiveShell._showtraceback(ip, None, None, chr(0xD8FF))
self.assertEqual(mocked_print.call_count, 2)
self.assertEqual(values, [chr(0xD8FF), "\\ud8ff"])
def test_mktempfile(self):
filename = ip.mktempfile()
# Check that we can open the file again on Windows
with open(filename, "w", encoding="utf-8") as f:
f.write("abc")
filename = ip.mktempfile(data="blah")
with open(filename, "r", encoding="utf-8") as f:
self.assertEqual(f.read(), "blah")
def test_new_main_mod(self):
# Smoketest to check that this accepts a unicode module name
name = "jiefmw"
mod = ip.new_main_mod("%s.py" % name, name)
self.assertEqual(mod.__name__, name)
def test_get_exception_only(self):
try:
raise KeyboardInterrupt
except KeyboardInterrupt:
msg = ip.get_exception_only()
self.assertEqual(msg, "KeyboardInterrupt\n")
try:
raise DerivedInterrupt("foo")
except KeyboardInterrupt:
msg = ip.get_exception_only()
self.assertEqual(msg, "tests.test_interactiveshell.DerivedInterrupt: foo\n")
def test_inspect_text(self):
ip.run_cell("a = 5")
text = ip.object_inspect_text("a")
self.assertIsInstance(text, str)
def test_last_execution_result(self):
"""Check that last execution result gets set correctly (GH-10702)"""
result = ip.run_cell("a = 5; a")
self.assertTrue(ip.last_execution_succeeded)
self.assertEqual(ip.last_execution_result.result, 5)
result = ip.run_cell("a = x_invalid_id_x")
self.assertFalse(ip.last_execution_succeeded)
self.assertFalse(ip.last_execution_result.success)
self.assertIsInstance(ip.last_execution_result.error_in_exec, NameError)
def test_reset_aliasing(self):
"""Check that standard posix aliases work after %reset."""
if os.name != "posix":
return
ip.reset()
for cmd in ("clear", "more", "less", "man"):
res = ip.run_cell("%" + cmd)
self.assertEqual(res.success, True)
@pytest.mark.skipif(
sys.implementation.name == "pypy"
and ((7, 3, 13) < sys.implementation.version < (7, 3, 16)),
reason="Unicode issues with scandir on PyPy, see https://github.com/pypy/pypy/issues/4860",
)
| InteractiveShellTestCase |
python | getsentry__sentry | src/sentry/models/debugfile.py | {
"start": 22615,
"end": 23788
} | class ____:
@property
def cache_path(self) -> str:
return options.get("dsym.cache-path")
def get_project_path(self, project: Project) -> str:
return os.path.join(self.cache_path, str(project.id))
def fetch_difs(
self, project: Project, debug_ids: Iterable[str], features: Iterable[str] | None = None
) -> Mapping[str, str]:
"""Given some ids returns an id to path mapping for where the
debug symbol files are on the FS.
"""
debug_ids = [str(debug_id).lower() for debug_id in debug_ids]
difs = ProjectDebugFile.objects.find_by_debug_ids(project, debug_ids, features)
rv = {}
for debug_id, dif in difs.items():
dif_path = os.path.join(self.get_project_path(project), debug_id)
try:
os.stat(dif_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
dif.file.save_to(dif_path)
rv[debug_id] = dif_path
return rv
def clear_old_entries(self) -> None:
clear_cached_files(self.cache_path)
ProjectDebugFile.difcache = DIFCache()
| DIFCache |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/redis.py | {
"start": 33,
"end": 265
} | class ____(BaseModel, extra="allow"):
enabled: bool
internal: bool
usePassword: bool
password: str
host: str
port: int
brokerDbNumber: int
backendDbNumber: int
brokerUrl: str
backendUrl: str
| Redis |
python | walkccc__LeetCode | solutions/2176. Count Equal and Divisible Pairs in an Array/2176.py | {
"start": 0,
"end": 481
} | class ____:
def countPairs(self, nums: list[int], k: int) -> int:
ans = 0
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
for indices in numToIndices.values():
gcds = collections.Counter()
for i in indices:
gcd_i = math.gcd(i, k)
for gcd_j, count in gcds.items():
if gcd_i * gcd_j % k == 0:
ans += count
gcds[gcd_i] += 1
return ans
| Solution |
python | openai__openai-python | src/openai/lib/streaming/responses/_events.py | {
"start": 2847,
"end": 2958
} | class ____(RawResponseFunctionCallArgumentsDeltaEvent):
snapshot: str
| ResponseFunctionCallArgumentsDeltaEvent |
python | streamlit__streamlit | lib/tests/streamlit/elements/alert_test.py | {
"start": 8290,
"end": 10305
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall Alert proto."""
def test_st_warning(self):
"""Test st.warning."""
st.warning("some warning")
el = self.get_delta_from_queue().new_element
assert el.alert.body == "some warning"
assert el.alert.format == Alert.WARNING
assert (
el.alert.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.alert.width_config.use_stretch
def test_st_warning_with_icon(self):
"""Test st.warning with icon."""
st.warning("some warning", icon="⚠️")
el = self.get_delta_from_queue().new_element
assert el.alert.body == "some warning"
assert el.alert.icon == "⚠️"
assert el.alert.format == Alert.WARNING
assert (
el.alert.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.alert.width_config.use_stretch
def test_st_warning_with_width_pixels(self):
"""Test st.warning with width in pixels."""
st.warning("some warning", width=500)
el = self.get_delta_from_queue().new_element
assert el.alert.body == "some warning"
assert el.alert.format == Alert.WARNING
assert (
el.alert.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.alert.width_config.pixel_width == 500
def test_st_warning_with_width_stretch(self):
"""Test st.warning with width set to stretch."""
st.warning("some warning", width="stretch")
el = self.get_delta_from_queue().new_element
assert el.alert.body == "some warning"
assert el.alert.format == Alert.WARNING
assert (
el.alert.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.alert.width_config.use_stretch
| StWarningAPITest |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 16382,
"end": 18949
} | class ____(Operation):
def __init__(self, axis=None, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x1, x2):
return backend.numpy.append(x1, x2, axis=self.axis)
def compute_output_spec(self, x1, x2):
x1_shape = x1.shape
x2_shape = x2.shape
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
if self.axis is None:
if None in x1_shape or None in x2_shape:
output_shape = [None]
else:
output_shape = [int(np.prod(x1_shape) + np.prod(x2_shape))]
return KerasTensor(output_shape, dtype=dtype)
if not shape_equal(x1_shape, x2_shape, [self.axis]):
raise ValueError(
"`append` requires inputs to have the same shape except the "
f"`axis={self.axis}`, but received shape {x1_shape} and "
f"{x2_shape}."
)
output_shape = list(x1_shape)
output_shape[self.axis] = x1_shape[self.axis] + x2_shape[self.axis]
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.append", "keras.ops.numpy.append"])
def append(
x1,
x2,
axis=None,
):
"""Append tensor `x2` to the end of tensor `x1`.
Args:
x1: First input tensor.
x2: Second input tensor.
axis: Axis along which tensor `x2` is appended to tensor `x1`.
If `None`, both tensors are flattened before use.
Returns:
A tensor with the values of `x2` appended to `x1`.
Examples:
>>> x1 = keras.ops.convert_to_tensor([1, 2, 3])
>>> x2 = keras.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]])
>>> keras.ops.append(x1, x2)
array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
When `axis` is specified, `x1` and `x2` must have compatible shapes.
>>> x1 = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
>>> x2 = keras.ops.convert_to_tensor([[7, 8, 9]])
>>> keras.ops.append(x1, x2, axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)
>>> x3 = keras.ops.convert_to_tensor([7, 8, 9])
>>> keras.ops.append(x1, x3, axis=0)
Traceback (most recent call last):
...
TypeError: Cannot concatenate arrays with different numbers of
dimensions: got (2, 3), (3,).
"""
if any_symbolic_tensors((x1, x2)):
return Append(axis=axis).symbolic_call(x1, x2)
return backend.numpy.append(x1, x2, axis=axis)
| Append |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 18612,
"end": 19019
} | class ____(sgqlc.types.Enum):
"""The possible values for an enabled/no policy enterprise setting.
Enumeration Choices:
* `ENABLED`: The setting is enabled for organizations in the
enterprise.
* `NO_POLICY`: There is no policy set for organizations in the
enterprise.
"""
__schema__ = github_schema
__choices__ = ("ENABLED", "NO_POLICY")
| EnterpriseEnabledSettingValue |
python | pallets__jinja | tests/test_regression.py | {
"start": 293,
"end": 2089
} | class ____:
def test_assigned_scoping(self, env):
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
"""
)
assert t.render(item=42) == "[1][2][3][4]42"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{%- set item = 42 %}
{{- item -}}
"""
)
assert t.render() == "[1][2][3][4]42"
t = env.from_string(
"""
{%- set item = 42 %}
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
"""
)
assert t.render() == "[1][2][3][4]42"
def test_closure_scoping(self, env):
t = env.from_string(
"""
{%- set wrapper = "<FOO>" %}
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
"""
)
assert t.render() == "[1][2][3][4]<FOO>"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{%- set wrapper = "<FOO>" %}
{{- wrapper -}}
"""
)
assert t.render() == "[1][2][3][4]<FOO>"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
"""
)
assert t.render(wrapper=23) == "[1][2][3][4]23"
| TestCorner |
python | pytorch__pytorch | test/distributed/test_c10d_logger.py | {
"start": 1300,
"end": 4555
} | class ____(DistributedTestBase):
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def test_get_or_create_logger(self):
self.assertIsNotNone(_c10d_logger)
self.assertEqual(1, len(_c10d_logger.handlers))
self.assertIsInstance(_c10d_logger.handlers[0], logging.NullHandler)
@_exception_logger
def _failed_broadcast_raise_exception(self):
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
@_exception_logger
def _failed_broadcast_not_raise_exception(self):
try:
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
except Exception:
pass
@with_comms
def test_exception_logger(self) -> None:
with self.assertRaises(Exception):
self._failed_broadcast_raise_exception()
with self.assertLogs(_c10d_logger, level="DEBUG") as captured:
self._failed_broadcast_not_raise_exception()
error_msg_dict = json.loads(
re.search("({.+})", captured.output[0]).group(0).replace("'", '"')
)
# NCCL adds additional nccl_version data to the error_msg_dict
if self.backend(device_type) == dist.Backend.NCCL:
self.assertEqual(len(error_msg_dict), 9)
else:
self.assertEqual(len(error_msg_dict), 8)
self.assertIn("pg_name", error_msg_dict.keys())
self.assertEqual("None", error_msg_dict["pg_name"])
self.assertIn("func_name", error_msg_dict.keys())
self.assertEqual("broadcast", error_msg_dict["func_name"])
self.assertIn("backend", error_msg_dict.keys())
self.assertEqual(self.backend(device_type), error_msg_dict["backend"])
if self.backend(device_type) == dist.Backend.NCCL:
self.assertIn("nccl_version", error_msg_dict.keys())
nccl_ver = torch.cuda.nccl.version()
self.assertEqual(
".".join(str(v) for v in nccl_ver), error_msg_dict["nccl_version"]
)
# In this test case, group_size = world_size, since we don't have multiple processes on one node.
self.assertIn("group_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["group_size"])
self.assertIn("world_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["world_size"])
self.assertIn("global_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["global_rank"])
# In this test case, local_rank = global_rank, since we don't have multiple processes on one node.
self.assertIn("local_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["local_rank"])
if __name__ == "__main__":
run_tests()
| C10dErrorLoggerTest |
python | dateutil__dateutil | src/dateutil/tz/win.py | {
"start": 1257,
"end": 3793
} | class ____(object):
"""
Class for accessing ``tzres.dll``, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
.. note::
Offsets found in the registry are generally of the form
``@tzres.dll,-114``. The offset in this case is 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
| tzres |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass10.py | {
"start": 150,
"end": 616
} | class ____(ABC):
@staticmethod
@abstractmethod
def method1() -> None: ...
@staticmethod
@abstractmethod
def method2() -> None:
pass
@classmethod
@abstractmethod
def method3(cls) -> None:
raise NotImplementedError
@classmethod
@abstractmethod
def method4(cls) -> None:
pass
# This should generate an error.
A.method1()
A.method2()
# This should generate an error.
A.method3()
A.method4()
| A |
python | getsentry__sentry | src/sentry/models/release_threshold/release_threshold.py | {
"start": 452,
"end": 1765
} | class ____(Model):
"""
NOTE:
To transition to utilizing AlertRules, there are some duplicated attrs we'll want to dedup.
AlertRule model should house metadata on the AlertRule itself (eg. type of alert rule)
AlertRuleTrigger model should house the trigger requirements (eg. value, over/under trigger type)
- TODO: Will need to determine how this translates to release_threshold evaluation
QuerySubscription model subscribes the AlertRule to specific query in Snuba
SnubaQuery model represents the actual query run in Snuba
- TODO: replace query constructed in release_thresholds api with activated SnubaQuery / determine whether we're constructing the same query or not
"""
__relocation_scope__ = RelocationScope.Excluded
threshold_type = BoundedPositiveIntegerField(choices=ReleaseThresholdType.as_choices())
trigger_type = BoundedPositiveIntegerField(choices=ReleaseThresholdTriggerType.as_choices())
value = models.IntegerField()
window_in_seconds = models.PositiveIntegerField()
project = FlexibleForeignKey("sentry.Project", db_index=True, related_name="release_thresholds")
environment = FlexibleForeignKey("sentry.Environment", null=True, db_index=True)
date_added = models.DateTimeField(default=timezone.now)
| ReleaseThreshold |
python | huggingface__transformers | src/transformers/models/informer/modular_informer.py | {
"start": 2433,
"end": 2894
} | class ____(PreTrainedModel):
config: InformerConfig
base_model_prefix = "model"
main_input_name = "past_values"
input_modalities = ("time",)
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module: nn.Module):
super()._init_weights(module)
if isinstance(module, InformerSinusoidalPositionalEmbedding):
init.copy_(module.weight, module.create_weight())
| InformerPreTrainedModel |
python | ray-project__ray | rllib/utils/replay_buffers/reservoir_replay_buffer.py | {
"start": 452,
"end": 4533
} | class ____(ReplayBuffer):
"""This buffer implements reservoir sampling.
The algorithm has been described by Jeffrey S. Vitter in "Random sampling
with a reservoir".
"""
def __init__(
self, capacity: int = 10000, storage_unit: str = "timesteps", **kwargs
):
"""Initializes a ReservoirBuffer instance.
Args:
capacity: Max number of timesteps to store in the FIFO
buffer. After reaching this number, older samples will be
dropped to make space for new ones.
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored.
"""
ReplayBuffer.__init__(self, capacity, storage_unit)
self._num_add_calls = 0
self._num_evicted = 0
@ExperimentalAPI
@override(ReplayBuffer)
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
"""Add a SampleBatch of experiences to self._storage.
An item consists of either one or more timesteps, a sequence or an
episode. Differs from add() in that it does not consider the storage
unit or type of batch and simply stores it.
Args:
item: The batch to be added.
``**kwargs``: Forward compatibility kwargs.
"""
self._num_timesteps_added += item.count
self._num_timesteps_added_wrap += item.count
# Update add counts.
self._num_add_calls += 1
# Update our timesteps counts.
if self._num_timesteps_added < self.capacity:
self._storage.append(item)
self._est_size_bytes += item.size_bytes()
else:
# Eviction of older samples has already started (buffer is "full")
self._eviction_started = True
idx = random.randint(0, self._num_add_calls - 1)
if idx < len(self._storage):
self._num_evicted += 1
self._evicted_hit_stats.push(self._hit_count[idx])
self._hit_count[idx] = 0
# This is a bit of a hack: ReplayBuffer always inserts at
# self._next_idx
self._next_idx = idx
self._evicted_hit_stats.push(self._hit_count[idx])
self._hit_count[idx] = 0
item_to_be_removed = self._storage[idx]
self._est_size_bytes -= item_to_be_removed.size_bytes()
self._storage[idx] = item
self._est_size_bytes += item.size_bytes()
assert item.count > 0, item
warn_replay_capacity(item=item, num_items=self.capacity / item.count)
@ExperimentalAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> dict:
"""Returns the stats of this buffer.
Args:
debug: If True, adds sample eviction statistics to the returned
stats dict.
Returns:
A dictionary of stats about this buffer.
"""
data = {
"num_evicted": self._num_evicted,
"num_add_calls": self._num_add_calls,
}
parent = ReplayBuffer.stats(self, debug)
parent.update(data)
return parent
@ExperimentalAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
parent = ReplayBuffer.get_state(self)
parent.update(self.stats())
return parent
@ExperimentalAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be
obtained by calling `self.get_state()`.
"""
self._num_evicted = state["num_evicted"]
self._num_add_calls = state["num_add_calls"]
ReplayBuffer.set_state(self, state)
# __sphinx_doc_reservoir_buffer__end__
| ReservoirReplayBuffer |
python | pytorch__pytorch | torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py | {
"start": 11509,
"end": 11852
} | class ____(enum.Enum):
PICKLE = 0
TORCH = 1
TORCH_JIT = 2
_GLOBALS_ALLOWED_TYPES: dict[Serialization, tuple[Any, ...]] = {
Serialization.PICKLE: (str, bytes, bool, int, float, complex),
Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule),
Serialization.TORCH: (torch.nn.Module,),
}
| Serialization |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol37.py | {
"start": 248,
"end": 331
} | class ____(metaclass=StyleMeta):
pass
x: type[Style] = Style
print(list(x))
| Style |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_dialect.py | {
"start": 9983,
"end": 13972
} | class ____(fixtures.TestBase):
"""mock test for encoding_errors.
While we tried to write a round trip test, I could only reproduce the
problem on Python 3 and only for STRING/CHAR. I couldn't get a CLOB to
come back with broken encoding and also under py2k cx_Oracle would always
return a bytestring with the correct encoding. Since the test barely
worked, it is not included here to avoid future problems. It's not clear
what other levels of encode/decode are going on such that explicitly
selecting for AL16UTF16 is still returning a utf-8 bytestring under py2k or
for CLOBs, nor is it really clear that this flag is useful, however, at
least for the Py3K case, cx_Oracle supports the flag and we did have one
user reporting that they had a (non-reproducible) database which
illustrated the problem so we will pass it in.
"""
# NOTE: these numbers are arbitrary, they are not the actual
# cx_Oracle constants
cx_Oracle_NUMBER = 0
cx_Oracle_STRING = 1
cx_Oracle_FIXED_CHAR = 2
cx_Oracle_CLOB = 3
cx_Oracle_NCLOB = 4
@testing.fixture
def cx_Oracle(self):
return mock.Mock(
NUMBER=self.cx_Oracle_NUMBER,
STRING=self.cx_Oracle_STRING,
FIXED_CHAR=self.cx_Oracle_FIXED_CHAR,
CLOB=self.cx_Oracle_CLOB,
NCLOB=self.cx_Oracle_NCLOB,
version="8.0.1",
__future__=mock.Mock(),
)
_oracle_char_combinations = testing.combinations(
(
"STRING",
cx_Oracle_STRING,
),
(
"FIXED_CHAR",
cx_Oracle_FIXED_CHAR,
),
(
"CLOB",
cx_Oracle_CLOB,
),
(
"NCLOB",
cx_Oracle_NCLOB,
),
argnames="cx_oracle_type",
id_="ia",
)
_dialect = testing.combinations(
cx_oracle.dialect, oracledb.dialect, argnames="dialect_cls"
)
def _assert_errorhandler(self, outconverter, has_errorhandler):
data = "\uee2c\u9a66" # this is u"\uee2c\u9a66"
utf8_w_errors = data.encode("utf-16")
if has_errorhandler:
eq_(
outconverter(utf8_w_errors),
data.encode("utf-16").decode("utf-8", "ignore"),
)
else:
assert_raises(UnicodeDecodeError, outconverter, utf8_w_errors)
@_oracle_char_combinations
@_dialect
def test_encoding_errors_cx_oracle(
self, cx_Oracle, cx_oracle_type, dialect_cls
):
ignore_dialect = dialect_cls(dbapi=cx_Oracle, encoding_errors="ignore")
ignore_outputhandler = (
ignore_dialect._generate_connection_outputtype_handler()
)
cursor = mock.Mock()
ignore_outputhandler(cursor, "foo", cx_oracle_type, None, None, None)
eq_(
cursor.mock_calls,
[
mock.call.var(
mock.ANY,
mock.ANY,
cursor.arraysize,
encodingErrors="ignore",
)
],
)
@_oracle_char_combinations
@_dialect
def test_no_encoding_errors_cx_oracle(
self, cx_Oracle, cx_oracle_type, dialect_cls
):
plain_dialect = dialect_cls(dbapi=cx_Oracle)
plain_outputhandler = (
plain_dialect._generate_connection_outputtype_handler()
)
cursor = mock.Mock()
plain_outputhandler(cursor, "foo", cx_oracle_type, None, None, None)
if cx_oracle_type in (cx_Oracle.FIXED_CHAR, cx_Oracle.STRING):
# no calls; without encodingErrors, use cx_Oracle's default unicode
# handling
eq_(
cursor.mock_calls,
[],
)
else:
eq_(
cursor.mock_calls,
[mock.call.var(mock.ANY, mock.ANY, cursor.arraysize)],
)
| EncodingErrorsTest |
python | simplejson__simplejson | setup.py | {
"start": 2734,
"end": 4759
} | class ____(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
raise SystemExit(
subprocess.call([sys.executable,
# Turn on deprecation warnings
'-Wd',
'simplejson/tests/__init__.py']))
def run_setup(with_binary):
cmdclass = dict(test=TestCommand)
if with_binary:
kw = dict(
ext_modules=[
Extension("simplejson._speedups", ["simplejson/_speedups.c"]),
],
cmdclass=dict(cmdclass, build_ext=ve_build_ext),
)
else:
kw = dict(cmdclass=cmdclass)
setup(
name="simplejson",
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
python_requires=PYTHON_REQUIRES,
author="Bob Ippolito",
author_email="bob@redivi.com",
url="https://github.com/simplejson/simplejson",
license="MIT License",
packages=['simplejson', 'simplejson.tests'],
platforms=['any'],
**kw)
DISABLE_SPEEDUPS = IS_PYPY or IS_GRAALPY or os.environ.get('DISABLE_SPEEDUPS') == '1'
CIBUILDWHEEL = os.environ.get('CIBUILDWHEEL') == '1'
REQUIRE_SPEEDUPS = CIBUILDWHEEL or os.environ.get('REQUIRE_SPEEDUPS') == '1'
try:
run_setup(not DISABLE_SPEEDUPS)
except BuildFailed:
if REQUIRE_SPEEDUPS:
raise
BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
"speedups are not enabled.")
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Failure information, if any, is above.")
print("I'm retrying the build without the C extension now.")
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Plain-Python installation succeeded.")
print('*' * 75)
| TestCommand |
python | cherrypy__cherrypy | cherrypy/lib/reprconf.py | {
"start": 7430,
"end": 11983
} | class ____:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError(
'unrepr does not recognize %s' % repr(o.__class__.__name__),
)
return m(o)
def astnode(self, s):
"""Return a Python3 ast Node compiled from a string."""
try:
import ast
except ImportError:
# Fallback to eval when ast package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = ast.parse('__tempvalue__ = ' + s)
return p.body[0].value
def build_Subscript(self, o):
return self.build(o.value)[self.build(o.slice)]
def build_Index(self, o):
return self.build(o.value)
def build_Call(self, o):
"""Emulate ``build_Call`` under Python 3.5+."""
# Workaround for python 3.5. _ast.Call signature, docs found at
# https://greentreesnakes.readthedocs.org/en/latest/nodes.html
import ast
callee = self.build(o.func)
args = []
if o.args is not None:
for a in o.args:
if isinstance(a, ast.Starred):
args.append(self.build(a.value))
else:
args.append(self.build(a))
kwargs = {}
for kw in o.keywords:
if kw.arg is None: # double asterix `**`
rst = self.build(kw.value)
if not isinstance(rst, dict):
raise TypeError(
'Invalid argument for call.Must be a mapping object.',
)
# give preference to the keys set directly from arg=value
for k, v in rst.items():
if k not in kwargs:
kwargs[k] = v
else: # defined on the call as: arg=value
kwargs[kw.arg] = self.build(kw.value)
return callee(*args, **kwargs)
def build_List(self, o):
return list(map(self.build, o.elts))
def build_Str(self, o):
return o.s
def build_Num(self, o):
return o.n
def build_Dict(self, o):
return dict(
[(self.build(k), self.build(v)) for k, v in zip(o.keys, o.values)],
)
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.id
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
return getattr(builtins, name)
except AttributeError:
pass
raise TypeError('unrepr could not resolve the name %s' % repr(name))
def build_Constant(self, o):
return o.value
def build_UnaryOp(self, o):
op, operand = map(self.build, [o.op, o.operand])
return op(operand)
def build_BinOp(self, o):
left, op, right = map(self.build, [o.left, o.op, o.right])
return op(left, right)
def build_Add(self, o):
return operator.add
def build_Mult(self, o):
return operator.mul
def build_USub(self, o):
return operator.neg
def build_Attribute(self, o):
parent = self.build(o.value)
return getattr(parent, o.attr)
def build_NoneType(self, o):
return None
def unrepr(s):
"""Return a Python object compiled from a string."""
if not s:
return s
b = _Builder()
obj = b.astnode(s)
return b.build(obj)
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
__import__(modulePath)
return sys.modules[modulePath]
def attributes(full_attribute_name):
"""Load a module and retrieve an attribute of that module."""
# Parse out the path, module, and attribute
last_dot = full_attribute_name.rfind('.')
attr_name = full_attribute_name[last_dot + 1 :]
mod_path = full_attribute_name[:last_dot]
mod = modules(mod_path)
# Let an AttributeError propagate outward.
try:
attr = getattr(mod, attr_name)
except AttributeError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (mod_path, attr_name),
)
# Return a reference to the attribute.
return attr
| _Builder |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 19364,
"end": 21301
} | class ____(GeneratedAirbyteSource):
class APIPassword:
@public
def __init__(self, api_password: str):
self.auth_method = "api_password"
self.api_password = check.str_param(api_password, "api_password")
class OAuth20:
@public
def __init__(
self,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
access_token: Optional[str] = None,
):
self.auth_method = "oauth2.0"
self.client_id = check.opt_str_param(client_id, "client_id")
self.client_secret = check.opt_str_param(client_secret, "client_secret")
self.access_token = check.opt_str_param(access_token, "access_token")
@public
def __init__(
self,
name: str,
shop: str,
credentials: Union["ShopifySource.APIPassword", "ShopifySource.OAuth20"],
start_date: str,
):
"""Airbyte Source for Shopify.
Documentation can be found at https://docs.airbyte.com/integrations/sources/shopify
Args:
name (str): The name of the destination.
shop (str): The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'.
credentials (Union[ShopifySource.APIPassword, ShopifySource.OAuth20]): The authorization method to use to retrieve data from Shopify
start_date (str): The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated.
"""
self.shop = check.str_param(shop, "shop")
self.credentials = check.inst_param(
credentials, "credentials", (ShopifySource.APIPassword, ShopifySource.OAuth20)
)
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Shopify", name)
| ShopifySource |
python | spack__spack | lib/spack/spack/test/installer_tui.py | {
"start": 10467,
"end": 12524
} | class ____:
"""Test time-based behaviors like spinner and cleanup"""
def test_spinner_updates(self):
"""Test that spinner advances over time"""
status, fake_time, _ = create_build_status()
add_mock_builds(status, 1)
# Initial spinner index
initial_index = status.spinner_index
# Advance time past spinner interval
fake_time[0] = inst.SPINNER_INTERVAL + 0.01
status.update()
# Spinner should have advanced
assert status.spinner_index == (initial_index + 1) % len(status.spinner_chars)
def test_finished_package_cleanup(self):
"""Test that finished packages are cleaned up after timeout"""
status, fake_time, _ = create_build_status()
(spec,) = add_mock_builds(status, 1)
build_id = spec.dag_hash()
# Mark as finished
fake_time[0] = 0.0
status.update_state(build_id, "finished")
# Build should still be in active builds
assert build_id in status.builds
assert len(status.finished_builds) == 0
# Advance time past cleanup timeout
fake_time[0] = inst.CLEANUP_TIMEOUT + 0.01
status.update()
# Build should now be moved to finished_builds and removed from active
assert build_id not in status.builds
# Note: finished_builds is cleared after rendering, so check it happened via side effects
assert status.dirty or build_id not in status.builds
def test_failed_packages_not_cleaned_up(self):
"""Test that failed packages stay in active builds"""
status, fake_time, _ = create_build_status()
(spec,) = add_mock_builds(status, 1)
build_id = spec.dag_hash()
# Mark as failed
fake_time[0] = 0.0
status.update_state(build_id, "failed")
# Advance time past cleanup timeout
fake_time[0] = inst.CLEANUP_TIMEOUT + 0.01
status.update()
# Failed build should remain in active builds
assert build_id in status.builds
| TestTimeBasedBehavior |
python | pennersr__django-allauth | allauth/headless/socialaccount/inputs.py | {
"start": 590,
"end": 646
} | class ____(SignupForm, inputs.Input):
pass
| SignupInput |
python | PyCQA__pylint | tests/functional/b/bad_reversed_sequence.py | {
"start": 591,
"end": 2105
} | class ____:
""" implements only __getitem__ """
def __getitem__(self, index):
return index
def uninferable(seq):
""" This can't be inferred at this moment,
make sure we don't have a false positive.
"""
return reversed(seq)
def test(path):
""" test function """
seq = reversed() # No argument given
seq = reversed(None) # [bad-reversed-sequence]
seq = reversed([1, 2, 3])
seq = reversed((1, 2, 3))
seq = reversed(set()) # [bad-reversed-sequence]
seq = reversed(iter([1, 2, 3])) # [bad-reversed-sequence]
seq = reversed(GoodReversed())
seq = reversed(SecondGoodReversed())
seq = reversed(BadReversed()) # [bad-reversed-sequence]
seq = reversed(SecondBadReversed()) # [bad-reversed-sequence]
seq = reversed(range(100))
seq = reversed(lambda: None) # [bad-reversed-sequence]
seq = reversed(deque([]))
seq = reversed("123")
seq = uninferable([1, 2, 3])
seq = reversed(path.split("/"))
return seq
def test_dict_ancestor_and_reversed():
"""Don't emit for subclasses of dict, with __reversed__ implemented."""
class Child(dict):
def __reversed__(self):
return reversed(range(10))
seq = reversed(OrderedDict())
return reversed(Child()), seq
def test_dont_emit_for_reversing_enums():
"""Don't emit when reversing enum classes"""
class Color(IntEnum):
RED = 1
GREEN = 2
BLUE = 3
for color in reversed(Color):
yield color
| SecondBadReversed |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1587740,
"end": 1588339
} | class ____(sgqlc.types.Union):
"""An item in an issue timeline"""
__schema__ = github_schema
__types__ = (
AssignedEvent,
ClosedEvent,
Commit,
CrossReferencedEvent,
DemilestonedEvent,
IssueComment,
LabeledEvent,
LockedEvent,
MilestonedEvent,
ReferencedEvent,
RenamedTitleEvent,
ReopenedEvent,
SubscribedEvent,
TransferredEvent,
UnassignedEvent,
UnlabeledEvent,
UnlockedEvent,
UnsubscribedEvent,
UserBlockedEvent,
)
| IssueTimelineItem |
python | doocs__leetcode | solution/2200-2299/2203.Minimum Weighted Subgraph With the Required Paths/Solution.py | {
"start": 0,
"end": 896
} | class ____:
def minimumWeight(
self, n: int, edges: List[List[int]], src1: int, src2: int, dest: int
) -> int:
def dijkstra(g, u):
dist = [inf] * n
dist[u] = 0
q = [(0, u)]
while q:
d, u = heappop(q)
if d > dist[u]:
continue
for v, w in g[u]:
if dist[v] > dist[u] + w:
dist[v] = dist[u] + w
heappush(q, (dist[v], v))
return dist
g = defaultdict(list)
rg = defaultdict(list)
for f, t, w in edges:
g[f].append((t, w))
rg[t].append((f, w))
d1 = dijkstra(g, src1)
d2 = dijkstra(g, src2)
d3 = dijkstra(rg, dest)
ans = min(sum(v) for v in zip(d1, d2, d3))
return -1 if ans >= inf else ans
| Solution |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 12818,
"end": 12891
} | class ____:
foo_value = attr.ib()
| HashCacheSerializationTestCachedSlots |
python | huggingface__transformers | src/transformers/models/roberta/modeling_roberta.py | {
"start": 47423,
"end": 50259
} | class ____(RobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| RobertaForTokenClassification |
python | falconry__falcon | falcon/_typing.py | {
"start": 3522,
"end": 3705
} | class ____(Protocol):
def __call__(
self,
resource: Resource,
req: Request,
resp: Response,
**kwargs: Any,
) -> None: ...
| ResponderMethod |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/clipboard/in_memory.py | {
"start": 152,
"end": 1060
} | class ____(Clipboard):
"""
Default clipboard implementation.
Just keep the data in memory.
This implements a kill-ring, for Emacs mode.
"""
def __init__(self, data: ClipboardData | None = None, max_size: int = 60) -> None:
assert max_size >= 1
self.max_size = max_size
self._ring: deque[ClipboardData] = deque()
if data is not None:
self.set_data(data)
def set_data(self, data: ClipboardData) -> None:
self._ring.appendleft(data)
while len(self._ring) > self.max_size:
self._ring.pop()
def get_data(self) -> ClipboardData:
if self._ring:
return self._ring[0]
else:
return ClipboardData()
def rotate(self) -> None:
if self._ring:
# Add the very first item at the end.
self._ring.append(self._ring.popleft())
| InMemoryClipboard |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 5613,
"end": 6846
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops_stack.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops_stack.stack(split1)
join2 = array_ops_stack.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
| RGBToYUVTest |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/embed/field.py | {
"start": 67,
"end": 162
} | class ____(TypedDict):
name: str
value: str
inline: bool
| DiscordMessageEmbedFieldDict |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 13658,
"end": 14055
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->FNet
| FNetOnlyNSPHead |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 216,
"end": 4644
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M']
cls.enterClassContext(translation.override("nl"))
super().setUpClass()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30:05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean("13:30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean("13:30:05.000155")
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30:05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"""
TimeFields with manually specified input formats can accept those
formats
"""
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30.05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"""
Localized TimeFields with manually specified input formats can accept
those formats.
"""
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30.05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
| LocalizedTimeTests |
python | doocs__leetcode | solution/2500-2599/2557.Maximum Number of Integers to Choose From a Range II/Solution.py | {
"start": 0,
"end": 617
} | class ____:
def maxCount(self, banned: List[int], n: int, maxSum: int) -> int:
banned.extend([0, n + 1])
ban = sorted(set(banned))
ans = 0
for i, j in pairwise(ban):
left, right = 0, j - i - 1
while left < right:
mid = (left + right + 1) >> 1
if (i + 1 + i + mid) * mid // 2 <= maxSum:
left = mid
else:
right = mid - 1
ans += left
maxSum -= (i + 1 + i + left) * left // 2
if maxSum <= 0:
break
return ans
| Solution |
python | doocs__leetcode | solution/0800-0899/0895.Maximum Frequency Stack/Solution.py | {
"start": 0,
"end": 493
} | class ____:
def __init__(self):
self.cnt = defaultdict(int)
self.q = []
self.ts = 0
def push(self, val: int) -> None:
self.ts += 1
self.cnt[val] += 1
heappush(self.q, (-self.cnt[val], -self.ts, val))
def pop(self) -> int:
val = heappop(self.q)[2]
self.cnt[val] -= 1
return val
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(val)
# param_2 = obj.pop()
| FreqStack |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/_angularaxis.py | {
"start": 235,
"end": 66881
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar"
_path_str = "layout.polar.angularaxis"
_valid_props = {
"autotypenumbers",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"color",
"direction",
"dtick",
"exponentformat",
"gridcolor",
"griddash",
"gridwidth",
"hoverformat",
"labelalias",
"layer",
"linecolor",
"linewidth",
"minexponent",
"minorloglabels",
"nticks",
"period",
"rotation",
"separatethousands",
"showexponent",
"showgrid",
"showline",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thetaunit",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"type",
"uirevision",
"visible",
}
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean, geometric mean or median
of all the values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'geometric mean ascending',
'geometric mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def direction(self):
"""
Sets the direction corresponding to positive angles.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['counterclockwise', 'clockwise']
Returns
-------
Any
"""
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def layer(self):
"""
Sets the layer on which this axis is displayed. If *above
traces*, this axis is displayed above all the subplot's traces
If *below traces*, this axis is displayed below all the
subplot's traces, but above the grid lines. Useful when used
together with scatter-like traces with `cliponaxis` set to
False to show markers and/or text nodes above this axis.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def minorloglabels(self):
"""
Determines how minor log labels are displayed. If *small
digits*, small digits i.e. 2 or 5 are displayed. If "complete",
complete digits are displayed. If "none", no labels are
displayed.
The 'minorloglabels' property is an enumeration that may be specified as:
- One of the following enumeration values:
['small digits', 'complete', 'none']
Returns
-------
Any
"""
return self["minorloglabels"]
@minorloglabels.setter
def minorloglabels(self, val):
self["minorloglabels"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def period(self):
"""
Set the angular period. Has an effect only when
`angularaxis.type` is "category".
The 'period' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["period"]
@period.setter
def period(self, val):
self["period"] = val
@property
def rotation(self):
"""
Sets that start position (in degrees) of the angular axis By
default, polar subplots with `direction` set to
"counterclockwise" get a `rotation` of 0 which corresponds to
due East (like what mathematicians prefer). In turn, polar with
`direction` set to "clockwise" get a rotation of 90 which
corresponds to due North (like on a compass),
The 'rotation' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["rotation"]
@rotation.setter
def rotation(self, val):
self["rotation"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thetaunit(self):
"""
Sets the format unit of the formatted "theta" values. Has an
effect only when `angularaxis.type` is "linear".
The 'thetaunit' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radians', 'degrees']
Returns
-------
Any
"""
return self["thetaunit"]
@thetaunit.setter
def thetaunit(self, val):
self["thetaunit"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.angularaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.polar.angularaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.polar.angularaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.polar.angularaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.layout.polar.angula
raxis.tickformatstopdefaults), sets the default property values
to use for elements of layout.polar.angularaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.angularaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.polar.angularaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def type(self):
"""
Sets the angular axis type. If "linear", set `thetaunit` to
determine the unit in which axis value are shown. If *category,
use `period` to set the number of integer coordinates around
polar axis.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `rotation`.
Defaults to `polar<N>.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
direction
Sets the direction corresponding to positive angles.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
period
Set the angular period. Has an effect only when
`angularaxis.type` is "category".
rotation
Sets that start position (in degrees) of the angular
axis By default, polar subplots with `direction` set to
"counterclockwise" get a `rotation` of 0 which
corresponds to due East (like what mathematicians
prefer). In turn, polar with `direction` set to
"clockwise" get a rotation of 90 which corresponds to
due North (like on a compass),
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thetaunit
Sets the format unit of the formatted "theta" values.
Has an effect only when `angularaxis.type` is "linear".
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.polar.an
gularaxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.pola
r.angularaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.polar.angularaxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
type
Sets the angular axis type. If "linear", set
`thetaunit` to determine the unit in which axis value
are shown. If *category, use `period` to set the number
of integer coordinates around polar axis.
uirevision
Controls persistence of user-driven changes in axis
`rotation`. Defaults to `polar<N>.uirevision`.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
"""
def __init__(
self,
arg=None,
autotypenumbers=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
direction=None,
dtick=None,
exponentformat=None,
gridcolor=None,
griddash=None,
gridwidth=None,
hoverformat=None,
labelalias=None,
layer=None,
linecolor=None,
linewidth=None,
minexponent=None,
minorloglabels=None,
nticks=None,
period=None,
rotation=None,
separatethousands=None,
showexponent=None,
showgrid=None,
showline=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thetaunit=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
type=None,
uirevision=None,
visible=None,
**kwargs,
):
"""
Construct a new AngularAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.polar.AngularAxis`
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
direction
Sets the direction corresponding to positive angles.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
period
Set the angular period. Has an effect only when
`angularaxis.type` is "category".
rotation
Sets that start position (in degrees) of the angular
axis By default, polar subplots with `direction` set to
"counterclockwise" get a `rotation` of 0 which
corresponds to due East (like what mathematicians
prefer). In turn, polar with `direction` set to
"clockwise" get a rotation of 90 which corresponds to
due North (like on a compass),
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thetaunit
Sets the format unit of the formatted "theta" values.
Has an effect only when `angularaxis.type` is "linear".
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.polar.an
gularaxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.pola
r.angularaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.polar.angularaxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
type
Sets the angular axis type. If "linear", set
`thetaunit` to determine the unit in which axis value
are shown. If *category, use `period` to set the number
of integer coordinates around polar axis.
uirevision
Controls persistence of user-driven changes in axis
`rotation`. Defaults to `polar<N>.uirevision`.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
Returns
-------
AngularAxis
"""
super().__init__("angularaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.AngularAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.AngularAxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autotypenumbers", arg, autotypenumbers)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("color", arg, color)
self._set_property("direction", arg, direction)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("layer", arg, layer)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("minexponent", arg, minexponent)
self._set_property("minorloglabels", arg, minorloglabels)
self._set_property("nticks", arg, nticks)
self._set_property("period", arg, period)
self._set_property("rotation", arg, rotation)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thetaunit", arg, thetaunit)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("type", arg, type)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| AngularAxis |
python | huggingface__transformers | tests/models/chinese_clip/test_modeling_chinese_clip.py | {
"start": 17120,
"end": 19518
} | class ____:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs)
self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
(
config,
input_ids,
token_type_ids,
attention_mask,
_,
__,
___,
) = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, token_type_ids, attention_mask, pixel_values
def get_config(self):
return ChineseCLIPConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values):
model = ChineseCLIPModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask, token_type_ids)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"return_loss": True,
}
return config, inputs_dict
@require_torch
| ChineseCLIPModelTester |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE790.py | {
"start": 1760,
"end": 2005
} | class ____:
@abc.abstractmethod
def func(self) -> str:
"""Docstring"""
...
def impl(self) -> str:
"""Docstring"""
return self.func()
def stub(self) -> str:
"""Docstring"""
...
| Repro |
python | graphql-python__graphene | graphene/types/tests/test_definition.py | {
"start": 1252,
"end": 9009
} | class ____(InputObjectType):
pass
def test_defines_a_query_only_schema():
blog_schema = Schema(Query)
assert blog_schema.query == Query
assert blog_schema.graphql_schema.query_type.graphene_type == Query
article_field = Query._meta.fields["article"]
assert article_field.type == Article
assert article_field.type._meta.name == "Article"
article_field_type = article_field.type
assert issubclass(article_field_type, ObjectType)
title_field = article_field_type._meta.fields["title"]
assert title_field.type == String
author_field = article_field_type._meta.fields["author"]
author_field_type = author_field.type
assert issubclass(author_field_type, ObjectType)
recent_article_field = author_field_type._meta.fields["recent_article"]
assert recent_article_field.type == Article
feed_field = Query._meta.fields["feed"]
assert feed_field.type.of_type == Article
def test_defines_a_mutation_schema():
blog_schema = Schema(Query, mutation=Mutation)
assert blog_schema.mutation == Mutation
assert blog_schema.graphql_schema.mutation_type.graphene_type == Mutation
write_mutation = Mutation._meta.fields["write_article"]
assert write_mutation.type == Article
assert write_mutation.type._meta.name == "Article"
def test_defines_a_subscription_schema():
blog_schema = Schema(Query, subscription=Subscription)
assert blog_schema.subscription == Subscription
assert blog_schema.graphql_schema.subscription_type.graphene_type == Subscription
subscription = Subscription._meta.fields["article_subscribe"]
assert subscription.type == Article
assert subscription.type._meta.name == "Article"
def test_includes_nested_input_objects_in_the_map():
class NestedInputObject(InputObjectType):
value = String()
class SomeInputObject(InputObjectType):
nested = InputField(NestedInputObject)
class SomeMutation(Mutation):
mutate_something = Field(Article, input=Argument(SomeInputObject))
class SomeSubscription(Mutation):
subscribe_to_something = Field(Article, input=Argument(SomeInputObject))
schema = Schema(query=Query, mutation=SomeMutation, subscription=SomeSubscription)
type_map = schema.graphql_schema.type_map
assert type_map["NestedInputObject"].graphene_type is NestedInputObject
def test_includes_interfaces_thunk_subtypes_in_the_type_map():
class SomeInterface(Interface):
f = Int()
class SomeSubtype(ObjectType):
class Meta:
interfaces = (SomeInterface,)
class Query(ObjectType):
iface = Field(lambda: SomeInterface)
schema = Schema(query=Query, types=[SomeSubtype])
type_map = schema.graphql_schema.type_map
assert type_map["SomeSubtype"].graphene_type is SomeSubtype
def test_includes_types_in_union():
class SomeType(ObjectType):
a = String()
class OtherType(ObjectType):
b = String()
class MyUnion(Union):
class Meta:
types = (SomeType, OtherType)
class Query(ObjectType):
union = Field(MyUnion)
schema = Schema(query=Query)
type_map = schema.graphql_schema.type_map
assert type_map["OtherType"].graphene_type is OtherType
assert type_map["SomeType"].graphene_type is SomeType
def test_maps_enum():
class SomeType(ObjectType):
a = String()
class OtherType(ObjectType):
b = String()
class MyUnion(Union):
class Meta:
types = (SomeType, OtherType)
class Query(ObjectType):
union = Field(MyUnion)
schema = Schema(query=Query)
type_map = schema.graphql_schema.type_map
assert type_map["OtherType"].graphene_type is OtherType
assert type_map["SomeType"].graphene_type is SomeType
def test_includes_interfaces_subtypes_in_the_type_map():
class SomeInterface(Interface):
f = Int()
class SomeSubtype(ObjectType):
class Meta:
interfaces = (SomeInterface,)
class Query(ObjectType):
iface = Field(SomeInterface)
schema = Schema(query=Query, types=[SomeSubtype])
type_map = schema.graphql_schema.type_map
assert type_map["SomeSubtype"].graphene_type is SomeSubtype
def test_stringifies_simple_types():
assert str(Int) == "Int"
assert str(Article) == "Article"
assert str(MyInterface) == "MyInterface"
assert str(MyUnion) == "MyUnion"
assert str(MyEnum) == "MyEnum"
assert str(MyInputObjectType) == "MyInputObjectType"
assert str(NonNull(Int)) == "Int!"
assert str(List(Int)) == "[Int]"
assert str(NonNull(List(Int))) == "[Int]!"
assert str(List(NonNull(Int))) == "[Int!]"
assert str(List(List(Int))) == "[[Int]]"
# def test_identifies_input_types():
# expected = (
# (GraphQLInt, True),
# (ObjectType, False),
# (InterfaceType, False),
# (UnionType, False),
# (EnumType, True),
# (InputObjectType, True)
# )
# for type_, answer in expected:
# assert is_input_type(type_) == answer
# assert is_input_type(GraphQLList(type_)) == answer
# assert is_input_type(GraphQLNonNull(type_)) == answer
# def test_identifies_output_types():
# expected = (
# (GraphQLInt, True),
# (ObjectType, True),
# (InterfaceType, True),
# (UnionType, True),
# (EnumType, True),
# (InputObjectType, False)
# )
# for type, answer in expected:
# assert is_output_type(type) == answer
# assert is_output_type(GraphQLList(type)) == answer
# assert is_output_type(GraphQLNonNull(type)) == answer
# def test_prohibits_nesting_nonnull_inside_nonnull():
# with raises(Exception) as excinfo:
# GraphQLNonNull(GraphQLNonNull(GraphQLInt))
# assert 'Can only create NonNull of a Nullable GraphQLType but got: Int!.' in str(excinfo.value)
# def test_prohibits_putting_non_object_types_in_unions():
# bad_union_types = [
# GraphQLInt,
# GraphQLNonNull(GraphQLInt),
# GraphQLList(GraphQLInt),
# InterfaceType,
# UnionType,
# EnumType,
# InputObjectType
# ]
# for x in bad_union_types:
# with raises(Exception) as excinfo:
# GraphQLSchema(
# GraphQLObjectType(
# 'Root',
# fields={
# 'union': GraphQLField(GraphQLUnionType('BadUnion', [x]))
# }
# )
# )
# assert 'BadUnion may only contain Object types, it cannot contain: ' + str(x) + '.' \
# == str(excinfo.value)
def test_does_not_mutate_passed_field_definitions():
class CommonFields:
field1 = String()
field2 = String(id=String())
class TestObject1(CommonFields, ObjectType):
pass
class TestObject2(CommonFields, ObjectType):
pass
assert TestObject1._meta.fields == TestObject2._meta.fields
class CommonFields:
field1 = String()
field2 = String()
class TestInputObject1(CommonFields, InputObjectType):
pass
class TestInputObject2(CommonFields, InputObjectType):
pass
assert TestInputObject1._meta.fields == TestInputObject2._meta.fields
def test_graphene_graphql_type_can_be_copied():
class Query(ObjectType):
field = String()
def resolve_field(self, info):
return ""
schema = Schema(query=Query)
query_type_copy = copy.copy(schema.graphql_schema.query_type)
assert query_type_copy.__dict__ == schema.graphql_schema.query_type.__dict__
assert isinstance(schema.graphql_schema.query_type, GrapheneGraphQLType)
| MyInputObjectType |
python | django__django | django/core/files/storage/filesystem.py | {
"start": 585,
"end": 8646
} | class ____(Storage, StorageSettingsMixin):
"""
Standard filesystem storage
"""
def __init__(
self,
location=None,
base_url=None,
file_permissions_mode=None,
directory_permissions_mode=None,
allow_overwrite=False,
):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
self._allow_overwrite = allow_overwrite
setting_changed.connect(self._clear_cached_properties)
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return os.path.abspath(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith("/"):
self._base_url += "/"
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(
self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS
)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(
self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS
)
def _open(self, name, mode="rb"):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
directory = os.path.dirname(full_path)
try:
if self.directory_permissions_mode is not None:
# Set the umask because os.makedirs() doesn't apply the "mode"
# argument to intermediate-level directories.
old_umask = os.umask(0o777 & ~self.directory_permissions_mode)
try:
os.makedirs(
directory, self.directory_permissions_mode, exist_ok=True
)
finally:
os.umask(old_umask)
else:
os.makedirs(directory, exist_ok=True)
except FileExistsError:
raise FileExistsError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, "temporary_file_path"):
file_move_safe(
content.temporary_file_path(),
full_path,
allow_overwrite=self._allow_overwrite,
)
# This is a normal uploadedfile that we can stream.
else:
# The combination of O_CREAT and O_EXCL makes os.open()
# raises an OSError if the file already exists before it's
# opened.
open_flags = (
os.O_WRONLY
| os.O_CREAT
| os.O_EXCL
| getattr(os, "O_BINARY", 0)
)
if self._allow_overwrite:
open_flags = open_flags & ~os.O_EXCL | os.O_TRUNC
fd = os.open(full_path, open_flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = "wb" if isinstance(chunk, bytes) else "wt"
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except FileExistsError:
# A new name is needed if the file exists.
name = self.get_available_name(name)
full_path = self.path(name)
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Ensure the saved path is always relative to the storage root.
name = os.path.relpath(full_path, self.location)
# Ensure the moved file has the same gid as the storage root.
self._ensure_location_group_id(full_path)
# Store filenames with forward slashes, even on Windows.
return str(name).replace("\\", "/")
def _ensure_location_group_id(self, full_path):
if os.name == "posix":
file_gid = os.stat(full_path).st_gid
location_gid = os.stat(self.location).st_gid
if file_gid != location_gid:
try:
os.chown(full_path, uid=-1, gid=location_gid)
except PermissionError:
pass
def delete(self, name):
if not name:
raise ValueError("The name must be given to delete().")
name = self.path(name)
# If the file or directory exists, delete it from the filesystem.
try:
if os.path.isdir(name):
os.rmdir(name)
else:
os.remove(name)
except FileNotFoundError:
# FileNotFoundError is raised if the file or directory was removed
# concurrently.
pass
def is_name_available(self, name, max_length=None):
if self._allow_overwrite:
return not (max_length and len(name) > max_length)
return super().is_name_available(name, max_length=max_length)
def get_alternative_name(self, file_root, file_ext):
if self._allow_overwrite:
return f"{file_root}{file_ext}"
return super().get_alternative_name(file_root, file_ext)
def exists(self, name):
return os.path.lexists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
with os.scandir(path) as entries:
for entry in entries:
if entry.is_dir():
directories.append(entry.name)
else:
files.append(entry.name)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip("/")
return urljoin(self.base_url, url)
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
tz = UTC if settings.USE_TZ else None
return datetime.fromtimestamp(ts, tz=tz)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
| FileSystemStorage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 5435,
"end": 5508
} | class ____(NamedTuple):
other: int
@dataclass(frozen=True)
| Concrete16_1 |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 8569,
"end": 9186
} | class ____(BaseDeclaration):
"""Specific BaseDeclaration to use for 'sequenced' fields.
These fields are typically used to generate increasing unique values.
Attributes:
function (function): A function, expecting the current sequence counter
and returning the computed value.
"""
def __init__(self, function):
super().__init__()
self.function = function
def evaluate(self, instance, step, extra):
logger.debug("Sequence: Computing next value of %r for seq=%s", self.function, step.sequence)
return self.function(int(step.sequence))
| Sequence |
python | kamyu104__LeetCode-Solutions | Python/merge-in-between-linked-lists.py | {
"start": 70,
"end": 151
} | class ____(object):
def __init__(self, val=0, next=None):
pass
| ListNode |
python | streamlit__streamlit | lib/streamlit/runtime/caching/storage/cache_storage_protocol.py | {
"start": 2959,
"end": 4220
} | class ____:
"""Context passed to the cache storage during initialization
This is the normalized parameters that are passed to CacheStorageManager.create()
method.
Parameters
----------
function_key: str
A hash computed based on function name and source code decorated
by `@st.cache_data`
function_display_name: str
The display name of the function that is decorated by `@st.cache_data`
ttl_seconds : float or None
The time-to-live for the keys in storage, in seconds. If None, the entry
will never expire.
max_entries : int or None
The maximum number of entries to store in the cache storage.
If None, the cache storage will not limit the number of entries.
persist : Literal["disk"] or None
The persistence mode for the cache storage.
Legacy parameter, that used in Streamlit current cache storage implementation.
Could be ignored by cache storage implementation, if storage does not support
persistence or it persistent by default.
"""
function_key: str
function_display_name: str
ttl_seconds: float | None = None
max_entries: int | None = None
persist: Literal["disk"] | None = None
| CacheStorageContext |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_implies.py | {
"start": 94,
"end": 2892
} | class ____(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, object, types, message=None):
self.assertTrue(isinstance(object, types),
message or '%r is not an instance of %r'%(object, types))
def testBasicImplies(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-relimport')
# First check that 'getopt' isn't accidentally in the graph:
mg = modulegraph.ModuleGraph(path=[root]+sys.path)
mg.add_script(os.path.join(root, 'script.py'))
node = mg.find_node('mod')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mg.find_node('getopt')
self.assertEqual(node, None)
# Now check that adding an implied dependency actually adds
# 'getopt' to the graph:
mg = modulegraph.ModuleGraph(path=[root]+sys.path, implies={
'mod': ['getopt']})
self.assertEqual(node, None)
mg.add_script(os.path.join(root, 'script.py'))
node = mg.find_node('mod')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mg.find_node('getopt')
self.assertIsInstance(node, modulegraph.SourceModule)
# Check that the edges are correct:
self.assertIn(mg.find_node('mod'), mg.get_edges(node)[1])
self.assertIn(node, mg.get_edges(mg.find_node('mod'))[0])
def testPackagedImplies(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-relimport')
# First check that 'getopt' isn't accidentally in the graph:
mg = modulegraph.ModuleGraph(path=[root]+sys.path)
mg.add_script(os.path.join(root, 'script.py'))
node = mg.find_node('mod')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mg.find_node('getopt')
self.assertEqual(node, None)
# Now check that adding an implied dependency actually adds
# 'getopt' to the graph:
mg = modulegraph.ModuleGraph(path=[root]+sys.path, implies={
'pkg.relative': ['getopt']})
node = mg.find_node('getopt')
self.assertEqual(node, None)
mg.add_script(os.path.join(root, 'script.py'))
node = mg.find_node('pkg.relative')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mg.find_node('getopt')
self.assertIsInstance(node, modulegraph.SourceModule)
# Check that the edges are correct:
self.assertIn(mg.find_node('pkg.relative'), mg.get_edges(node)[1])
self.assertIn(node, mg.get_edges(mg.find_node('pkg.relative'))[0])
if __name__ == '__main__':
unittest.main()
| ImpliesTestCase |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_month.py | {
"start": 20382,
"end": 23270
} | class ____:
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
offset = MonthEnd()
result = dt + offset
assert result == Timestamp(2007, 1, 31)
result = result + offset
assert result == Timestamp(2007, 2, 28)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
assert result == expected
offset_cases = []
offset_cases.append(
(
MonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
},
)
)
offset_cases.append(
(
MonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| TestMonthEnd |
python | wandb__wandb | wandb/vendor/pygments/lexers/javascript.py | {
"start": 49395,
"end": 57559
} | class ____(RegexLexer):
"""
For `Earl-Grey`_ source code.
.. _Earl-Grey: https://breuleux.github.io/earl-grey/
.. versionadded: 2.1
"""
name = 'Earl Grey'
aliases = ['earl-grey', 'earlgrey', 'eg']
filenames = ['*.eg']
mimetypes = ['text/x-earl-grey']
tokens = {
'root': [
(r'\n', Text),
include('control'),
(r'[^\S\n]+', Text),
(r';;.*\n', Comment),
(r'[\[\]{}:(),;]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
include('errors'),
(words((
'with', 'where', 'when', 'and', 'not', 'or', 'in',
'as', 'of', 'is'),
prefix=r'(?<=\s|\[)', suffix=r'(?![\w$\-])'),
Operator.Word),
(r'[*@]?->', Name.Function),
(r'[+\-*/~^<>%&|?!@#.]*=', Operator.Word),
(r'\.{2,3}', Operator.Word), # Range Operator
(r'([+*/~^<>&|?!]+)|([#\-](?=\s))|@@+(?=\s)|=+', Operator),
(r'(?<![\w$\-])(var|let)(?:[^\w$])', Keyword.Declaration),
include('keywords'),
include('builtins'),
include('assignment'),
(r'''(?x)
(?:()([a-zA-Z$_](?:[\w$\-]*[\w$])?)|
(?<=[\s{\[(])(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?))
(?=.*%)''',
bygroups(Punctuation, Name.Tag, Punctuation, Name.Class.Start), 'dbs'),
(r'[rR]?`', String.Backtick, 'bt'),
(r'[rR]?```', String.Backtick, 'tbt'),
(r'(?<=[\s\[{(,;])\.([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
r'(?=[\s\]}),;])', String.Symbol),
include('nested'),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?"', String, combined('stringescape', 'dqs')),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?\'', String, combined('stringescape', 'sqs')),
(r'"""', String, combined('stringescape', 'tdqs')),
include('tuple'),
include('import_paths'),
include('name'),
include('numbers'),
],
'dbs': [
(r'(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)(?=[.\[\s])',
bygroups(Punctuation, Name.Class.DBS)),
(r'(\[)([\^#][a-zA-Z$_](?:[\w$\-]*[\w$])?)(\])',
bygroups(Punctuation, Name.Entity.DBS, Punctuation)),
(r'\s+', Text),
(r'%', Operator.DBS, '#pop'),
],
'import_paths': [
(r'(?<=[\s:;,])(\.{1,3}(?:[\w\-]*/)*)(\w(?:[\w\-]*\w)*)(?=[\s;,])',
bygroups(Text.Whitespace, Text)),
],
'assignment': [
(r'(\.)?([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
r'(?=\s+[+\-*/~^<>%&|?!@#.]*\=\s)',
bygroups(Punctuation, Name.Variable))
],
'errors': [
(words(('Error', 'TypeError', 'ReferenceError'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Exception),
(r'''(?x)
(?<![\w$])
E\.[\w$](?:[\w$\-]*[\w$])?
(?:\.[\w$](?:[\w$\-]*[\w$])?)*
(?=[({\[?!\s])''',
Name.Exception),
],
'control': [
(r'''(?x)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?!\n)\s+
(?!and|as|each\*|each|in|is|mod|of|or|when|where|with)
(?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[\w$-]*[\w$])?)''',
Keyword.Control),
(r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(?!\n)\s+(?=[\'"\d{\[(])',
Keyword.Control),
(r'''(?x)
(?:
(?<=[%=])|
(?<=[=\-]>)|
(?<=with|each|with)|
(?<=each\*|where)
)(\s+)
([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Text, Keyword.Control, Punctuation)),
(r'''(?x)
(?<![+\-*/~^<>%&|?!@#.])(\s+)
([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Text, Keyword.Control, Punctuation)),
],
'nested': [
(r'''(?x)
(?<=[\w$\]})])(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=\s+with(?:\s|\n))''',
bygroups(Punctuation, Name.Function)),
(r'''(?x)
(?<!\s)(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=[}\]).,;:\s])''',
bygroups(Punctuation, Name.Field)),
(r'''(?x)
(?<=[\w$\]})])(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=[\[{(:])''',
bygroups(Punctuation, Name.Function)),
],
'keywords': [
(words((
'each', 'each*', 'mod', 'await', 'break', 'chain',
'continue', 'elif', 'expr-value', 'if', 'match',
'return', 'yield', 'pass', 'else', 'require', 'var',
'let', 'async', 'method', 'gen'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Keyword.Pseudo),
(words(('this', 'self', '@'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Constant),
(words((
'Function', 'Object', 'Array', 'String', 'Number',
'Boolean', 'ErrorFactory', 'ENode', 'Promise'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Type),
],
'builtins': [
(words((
'send', 'object', 'keys', 'items', 'enumerate', 'zip',
'product', 'neighbours', 'predicate', 'equal',
'nequal', 'contains', 'repr', 'clone', 'range',
'getChecker', 'get-checker', 'getProperty', 'get-property',
'getProjector', 'get-projector', 'consume', 'take',
'promisify', 'spawn', 'constructor'),
prefix=r'(?<![\w\-#.])', suffix=r'(?![\w\-.])'),
Name.Builtin),
(words((
'true', 'false', 'null', 'undefined'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Constant),
],
'name': [
(r'@([a-zA-Z$_](?:[\w$-]*[\w$])?)', Name.Variable.Instance),
(r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(\+\+|\-\-)?',
bygroups(Name.Symbol, Operator.Word))
],
'tuple': [
(r'#[a-zA-Z_][\w\-]*(?=[\s{(,;])', Name.Namespace)
],
'interpoling_string': [
(r'\}', String.Interpol, '#pop'),
include('root')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'[^\\\'"]', String),
(r'[\'"\\]', String),
(r'\n', String) # All strings are multiline in EG
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape),
(r'\{', String.Interpol, 'interpoling_string'),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
],
'bt': [
(r'`', String.Backtick, '#pop'),
(r'(?<!`)\n', String.Backtick),
(r'\^=?', String.Escape),
(r'.+', String.Backtick),
],
'tbt': [
(r'```', String.Backtick, '#pop'),
(r'\n', String.Backtick),
(r'\^=?', String.Escape),
(r'[^`]+', String.Backtick),
],
'numbers': [
(r'\d+\.(?!\.)\d*([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'8r[0-7]+', Number.Oct),
(r'2r[01]+', Number.Bin),
(r'16r[a-fA-F0-9]+', Number.Hex),
(r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', Number.Radix),
(r'\d+', Number.Integer)
],
}
| EarlGreyLexer |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/shell_tool.py | {
"start": 3353,
"end": 10488
} | class ____:
"""Persistent shell session that supports sequential command execution."""
def __init__(
self,
workspace: Path,
policy: BaseExecutionPolicy,
command: tuple[str, ...],
environment: Mapping[str, str],
) -> None:
self._workspace = workspace
self._policy = policy
self._command = command
self._environment = dict(environment)
self._process: subprocess.Popen[str] | None = None
self._stdin: Any = None
self._queue: queue.Queue[tuple[str, str | None]] = queue.Queue()
self._lock = threading.Lock()
self._stdout_thread: threading.Thread | None = None
self._stderr_thread: threading.Thread | None = None
self._terminated = False
def start(self) -> None:
"""Start the shell subprocess and reader threads."""
if self._process and self._process.poll() is None:
return
self._process = self._policy.spawn(
workspace=self._workspace,
env=self._environment,
command=self._command,
)
if (
self._process.stdin is None
or self._process.stdout is None
or self._process.stderr is None
):
msg = "Failed to initialize shell session pipes."
raise RuntimeError(msg)
self._stdin = self._process.stdin
self._terminated = False
self._queue = queue.Queue()
self._stdout_thread = threading.Thread(
target=self._enqueue_stream,
args=(self._process.stdout, "stdout"),
daemon=True,
)
self._stderr_thread = threading.Thread(
target=self._enqueue_stream,
args=(self._process.stderr, "stderr"),
daemon=True,
)
self._stdout_thread.start()
self._stderr_thread.start()
def restart(self) -> None:
"""Restart the shell process."""
self.stop(self._policy.termination_timeout)
self.start()
def stop(self, timeout: float) -> None:
"""Stop the shell subprocess."""
if not self._process:
return
if self._process.poll() is None and not self._terminated:
try:
self._stdin.write("exit\n")
self._stdin.flush()
except (BrokenPipeError, OSError):
LOGGER.debug(
"Failed to write exit command; terminating shell session.",
exc_info=True,
)
try:
if self._process.wait(timeout=timeout) is None:
self._kill_process()
except subprocess.TimeoutExpired:
self._kill_process()
finally:
self._terminated = True
with contextlib.suppress(Exception):
self._stdin.close()
self._process = None
def execute(self, command: str, *, timeout: float) -> CommandExecutionResult:
"""Execute a command in the persistent shell."""
if not self._process or self._process.poll() is not None:
msg = "Shell session is not running."
raise RuntimeError(msg)
marker = f"{_DONE_MARKER_PREFIX}{uuid.uuid4().hex}"
deadline = time.monotonic() + timeout
with self._lock:
self._drain_queue()
payload = command if command.endswith("\n") else f"{command}\n"
self._stdin.write(payload)
self._stdin.write(f"printf '{marker} %s\\n' $?\n")
self._stdin.flush()
return self._collect_output(marker, deadline, timeout)
def _collect_output(
self,
marker: str,
deadline: float,
timeout: float,
) -> CommandExecutionResult:
collected: list[str] = []
total_lines = 0
total_bytes = 0
truncated_by_lines = False
truncated_by_bytes = False
exit_code: int | None = None
timed_out = False
while True:
remaining = deadline - time.monotonic()
if remaining <= 0:
timed_out = True
break
try:
source, data = self._queue.get(timeout=remaining)
except queue.Empty:
timed_out = True
break
if data is None:
continue
if source == "stdout" and data.startswith(marker):
_, _, status = data.partition(" ")
exit_code = self._safe_int(status.strip())
break
total_lines += 1
encoded = data.encode("utf-8", "replace")
total_bytes += len(encoded)
if total_lines > self._policy.max_output_lines:
truncated_by_lines = True
continue
if (
self._policy.max_output_bytes is not None
and total_bytes > self._policy.max_output_bytes
):
truncated_by_bytes = True
continue
if source == "stderr":
stripped = data.rstrip("\n")
collected.append(f"[stderr] {stripped}")
if data.endswith("\n"):
collected.append("\n")
else:
collected.append(data)
if timed_out:
LOGGER.warning(
"Command timed out after %.2f seconds; restarting shell session.",
timeout,
)
self.restart()
return CommandExecutionResult(
output="",
exit_code=None,
timed_out=True,
truncated_by_lines=truncated_by_lines,
truncated_by_bytes=truncated_by_bytes,
total_lines=total_lines,
total_bytes=total_bytes,
)
output = "".join(collected)
return CommandExecutionResult(
output=output,
exit_code=exit_code,
timed_out=False,
truncated_by_lines=truncated_by_lines,
truncated_by_bytes=truncated_by_bytes,
total_lines=total_lines,
total_bytes=total_bytes,
)
def _kill_process(self) -> None:
if not self._process:
return
if hasattr(os, "killpg"):
with contextlib.suppress(ProcessLookupError):
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
else: # pragma: no cover
with contextlib.suppress(ProcessLookupError):
self._process.kill()
def _enqueue_stream(self, stream: Any, label: str) -> None:
for line in iter(stream.readline, ""):
self._queue.put((label, line))
self._queue.put((label, None))
def _drain_queue(self) -> None:
while True:
try:
self._queue.get_nowait()
except queue.Empty:
break
@staticmethod
def _safe_int(value: str) -> int | None:
with contextlib.suppress(ValueError):
return int(value)
return None
| ShellSession |
python | apache__airflow | providers/cloudant/tests/unit/cloudant/hooks/test_cloudant.py | {
"start": 1062,
"end": 2862
} | class ____:
def setup_method(self):
self.cloudant_hook = CloudantHook()
@patch(
"airflow.providers.cloudant.hooks.cloudant.CloudantHook.get_connection",
return_value=Connection(login="the_user", password="the_password", host="the_account"),
)
@patch("airflow.providers.cloudant.hooks.cloudant.CouchDbSessionAuthenticator")
@patch("airflow.providers.cloudant.hooks.cloudant.CloudantV1")
def test_get_conn_passes_expected_params_and_returns_cloudant_object(
self, mock_cloudant_v1, mock_session_authenticator, mock_get_connection
):
cloudant_session = self.cloudant_hook.get_conn()
conn = mock_get_connection.return_value
mock_session_authenticator.assert_called_once_with(username=conn.login, password=conn.password)
mock_cloudant_v1.assert_called_once_with(authenticator=mock_session_authenticator.return_value)
cloudant_service = mock_cloudant_v1.return_value
cloudant_service.set_service_url.assert_called_once_with(f"https://{conn.host}.cloudant.com")
assert cloudant_session == cloudant_service
@pytest.mark.parametrize(
"conn",
[
Connection(),
Connection(host="acct"),
Connection(login="user"),
Connection(password="pwd"),
Connection(host="acct", login="user"),
Connection(host="acct", password="pwd"),
Connection(login="user", password="pwd"),
],
)
@patch("airflow.providers.cloudant.hooks.cloudant.CloudantHook.get_connection")
def test_get_conn_invalid_connection(self, mock_get_connection, conn):
mock_get_connection.return_value = conn
with pytest.raises(AirflowException):
self.cloudant_hook.get_conn()
| TestCloudantHook |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_parameterized_distribution_ks_test_p_value.py | {
"start": 489,
"end": 1537
} | class ____(ColumnAggregateMetricProvider):
"""MetricProvider Class for Aggregate Standard Deviation metric"""
metric_name = "column.parameterized_distribution_ks_test_p_value"
value_keys = ("distribution", "p_value", "params")
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, distribution, p_value=0.05, params=None, **kwargs):
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive") # noqa: TRY003 # FIXME CoP
validate_distribution_parameters(distribution=distribution, params=params)
# Format arguments for scipy.kstest
if isinstance(params, dict):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params
)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution, args=positional_parameters)
return ks_result
| ColumnParameterizedDistributionKSTestPValue |
python | hynek__structlog | src/structlog/twisted.py | {
"start": 1639,
"end": 3564
} | class ____:
"""
Build a Twisted logger when an *instance* is called.
>>> from structlog import configure
>>> from structlog.twisted import LoggerFactory
>>> configure(logger_factory=LoggerFactory())
"""
def __call__(self, *args: Any) -> WrappedLogger:
"""
Positional arguments are silently ignored.
:rvalue: A new Twisted logger.
.. versionchanged:: 0.4.0
Added support for optional positional arguments.
"""
return log
_FAIL_TYPES = (BaseException, Failure)
def _extractStuffAndWhy(eventDict: EventDict) -> tuple[Any, Any, EventDict]:
"""
Removes all possible *_why*s and *_stuff*s, analyzes exc_info and returns
a tuple of ``(_stuff, _why, eventDict)``.
**Modifies** *eventDict*!
"""
_stuff = eventDict.pop("_stuff", None)
_why = eventDict.pop("_why", None)
event = eventDict.pop("event", None)
if isinstance(_stuff, _FAIL_TYPES) and isinstance(event, _FAIL_TYPES):
raise ValueError("Both _stuff and event contain an Exception/Failure.")
# `log.err('event', _why='alsoEvent')` is ambiguous.
if _why and isinstance(event, str):
raise ValueError("Both `_why` and `event` supplied.")
# Two failures are ambiguous too.
if not isinstance(_stuff, _FAIL_TYPES) and isinstance(event, _FAIL_TYPES):
_why = _why or "error"
_stuff = event
if isinstance(event, str):
_why = event
if not _stuff and sys.exc_info() != (None, None, None):
_stuff = Failure() # type: ignore[no-untyped-call]
# Either we used the error ourselves or the user supplied one for
# formatting. Avoid log.err() to dump another traceback into the log.
if isinstance(_stuff, BaseException) and not isinstance(_stuff, Failure):
_stuff = Failure(_stuff) # type: ignore[no-untyped-call]
return _stuff, _why, eventDict
| LoggerFactory |
python | getsentry__sentry | src/sentry/analytics/events/auth_v2.py | {
"start": 204,
"end": 344
} | class ____(analytics.Event):
event: str
analytics.register(AuthV2CsrfTokenRotated)
analytics.register(AuthV2DeleteLogin)
| AuthV2DeleteLogin |
python | plotly__plotly.py | plotly/graph_objs/box/_stream.py | {
"start": 233,
"end": 3479
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "box"
_path_str = "box.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.box.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | getsentry__sentry | tests/sentry/issue_detection/test_consecutive_db_detector.py | {
"start": 722,
"end": 11869
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(self, event: dict[str, Any]) -> list[PerformanceProblem]:
detector = ConsecutiveDBSpanDetector(self._settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def create_issue_event(self, span_duration: int = 50) -> dict[str, Any]:
spans = [
create_span(
"db",
span_duration,
"SELECT `customer`.`id` FROM `customers` WHERE `customer`.`name` = $1",
),
create_span(
"db",
span_duration,
"SELECT `order`.`id` FROM `books_author` WHERE `author`.`type` = $1",
),
create_span("db", 900, "SELECT COUNT(*) FROM `products`"),
]
spans = [
modify_span_start(span, span_duration * spans.index(span)) for span in spans
] # ensure spans don't overlap
return create_event(spans)
def test_detects_consecutive_db_spans(self) -> None:
span_duration = 1 * SECOND
spans = [
create_span("db", span_duration, "SELECT `customer`.`id` FROM `customers`"),
create_span("db", span_duration, "SELECT `order`.`id` FROM `books_author`"),
create_span("db", span_duration, "SELECT `product`.`id` FROM `products`"),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
event = create_event(spans)
problems = self.find_problems(event)
assert problems == [
PerformanceProblem(
fingerprint="1-1007-e6a9fc04320a924f46c7c737432bb0389d9dd095",
op="db",
desc="SELECT `order`.`id` FROM `books_author`",
type=PerformanceConsecutiveDBQueriesGroupType,
parent_span_ids=None,
cause_span_ids=["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
offender_span_ids=["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
evidence_data={
"op": "db",
"parent_span_ids": None,
"cause_span_ids": ["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
"offender_span_ids": ["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
},
evidence_display=[],
)
]
def test_does_not_detect_consecutive_db_spans_with_truncated_query(self) -> None:
span_duration = 10
spans = [
create_span("db", span_duration, "SELECT `customer`.`id` FROM `customers`"),
create_span(
"db",
span_duration,
"SELECT `order`.`id` FROM `books_author` WHERE `order`.`name` = %s",
),
create_span("db", span_duration, "SELECT `product`.`id` FROM `products` ..."),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
event = create_event(spans)
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_db_spans_with_where(self) -> None:
span_duration = 5
spans = [
create_span("db", span_duration, "SELECT `customer`.`id` FROM `customers`"),
create_span(
"db",
span_duration,
"SELECT `order`.`id` FROM `books_author` WHERE `books_author`.`id` = %s",
),
create_span(
"db",
span_duration,
"SELECT `product`.`id` FROM `products` WHERE `product`.`name` = %s",
),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
event = create_event(spans)
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_db_spans_with_fast_spans(self) -> None:
span_duration = 1
spans = [
create_span("db", span_duration, "SELECT `customer`.`id` FROM `customers`"),
create_span("db", span_duration, "SELECT `order`.`id` FROM `books_author`"),
create_span("db", span_duration, "SELECT `product`.`id` FROM `products`"),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
event = create_event(spans)
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_db_spans_with_parameterized_query(self) -> None:
span_duration = 750
spans = [
create_span(
"db",
span_duration,
"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_123' ORDER BY b.created_at DESC LIMIT 3",
),
create_span(
"db",
span_duration,
"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_456' ORDER BY b.created_at DESC LIMIT 3",
),
create_span(
"db",
span_duration,
"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_789' ORDER BY b.created_at DESC LIMIT 3",
),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
event = create_event(spans)
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_db_in_query_waterfall_event(self) -> None:
event = get_event("n-plus-one-db/query-waterfall-in-django-random-view")
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_db_with_low_time_saving(self) -> None:
event = self.create_issue_event(10)
assert self.find_problems(event) == []
def test_detects_consecutive_db_with_high_time_saving(self) -> None:
event = self.create_issue_event()
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1007-3bc15c8aae3e4124dd409035f32ea2fd6835efc9",
op="db",
desc="SELECT COUNT(*) FROM `products`",
type=PerformanceConsecutiveDBQueriesGroupType,
parent_span_ids=None,
cause_span_ids=["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"op": "db",
"parent_span_ids": None,
"cause_span_ids": ["bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbb"],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
},
evidence_display=[],
)
]
def test_fingerprint_of_autogroups_match(self) -> None:
span_duration = 50
spans_1 = [
create_span(
"db",
span_duration,
"SELECT `customer`.`id` FROM `customers` WHERE `customer`.`name` = $1",
),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 900, "SELECT COUNT(*) FROM `products`"),
]
spans_1 = [
modify_span_start(span, span_duration * spans_1.index(span)) for span in spans_1
] # ensure spans don't overlap
spans_2 = [
create_span(
"db",
span_duration,
"SELECT `customer`.`id` FROM `customers` WHERE `customer`.`name` = $1",
),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 20, "SELECT `customer`.`id` FROM `customers`..."),
create_span("db", 900, "SELECT COUNT(*) FROM `products`"),
]
spans_2 = [
modify_span_start(span, span_duration * spans_2.index(span)) for span in spans_2
] # ensure spans don't overlap
event_1 = create_event(spans_1)
event_2 = create_event(spans_2)
fingerprint_1 = self.find_problems(event_1)[0].fingerprint
fingerprint_2 = self.find_problems(event_2)[0].fingerprint
assert fingerprint_1 == fingerprint_2
def test_respects_project_option(self) -> None:
project = self.create_project()
event = self.create_issue_event()
event["project_id"] = project.id
settings = get_detection_settings(project.id)
detector = ConsecutiveDBSpanDetector(settings, event)
assert detector.is_creation_allowed_for_project(project)
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"consecutive_db_queries_detection_enabled": False},
)
settings = get_detection_settings(project.id)
detector = ConsecutiveDBSpanDetector(settings, event)
assert not detector.is_creation_allowed_for_project(project)
def test_detects_consecutive_db_does_not_detect_php(self) -> None:
event = self.create_issue_event()
assert len(self.find_problems(event)) == 1
event["sdk"] = {"name": "sentry.php.laravel"}
assert self.find_problems(event) == []
def test_ignores_events_with_low_time_saving_ratio(self) -> None:
span_duration = 100
spans = [
create_span(
"db",
span_duration,
"SELECT `customer`.`id` FROM `customers` WHERE `customer`.`name` = $1",
),
create_span(
"db",
span_duration,
"SELECT `order`.`id` FROM `books_author` WHERE `author`.`type` = $1",
),
create_span("db", 3000, "SELECT COUNT(*) FROM `products`"),
]
spans = [
modify_span_start(span, span_duration * spans.index(span)) for span in spans
] # ensure spans don't overlap
event = create_event(spans)
assert self.find_problems(event) == []
def test_ignores_graphql(self) -> None:
event = self.create_issue_event()
event["request"] = {"url": "https://url.dev/api/my-endpoint", "method": "POST"}
assert len(self.find_problems(event)) == 1
event["request"]["url"] = "https://url.dev/api/graphql"
assert self.find_problems(event) == []
| ConsecutiveDbDetectorTest |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 22392,
"end": 22513
} | class ____(AbstractExternal3):
name = models.CharField(max_length=15, unique=True)
| OverrideModelNameUsingExternalModel2 |
python | tensorflow__tensorflow | tensorflow/python/trackable/resource_test.py | {
"start": 2161,
"end": 4233
} | class ____(test.TestCase):
def testBasic(self):
resource_tracker = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker):
dummy_resource1 = _DummyResource("test1")
dummy_resource2 = _DummyResource("test2")
self.assertEqual(2, len(resource_tracker.resources))
self.assertEqual("test1", resource_tracker.resources[0].resource_handle)
self.assertEqual("test2", resource_tracker.resources[1].resource_handle)
def testTwoScopes(self):
resource_tracker1 = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker1):
dummy_resource1 = _DummyResource("test1")
resource_tracker2 = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker2):
dummy_resource2 = _DummyResource("test2")
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test1", resource_tracker1.resources[0].resource_handle)
self.assertEqual(1, len(resource_tracker2.resources))
self.assertEqual("test2", resource_tracker2.resources[0].resource_handle)
def testNestedScopesScopes(self):
resource_tracker = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker):
resource_tracker1 = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker1):
dummy_resource1 = _DummyResource("test1")
resource_tracker2 = resource.ResourceTracker()
with resource.resource_tracker_scope(resource_tracker2):
dummy_resource2 = _DummyResource("test2")
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test1", resource_tracker1.resources[0].resource_handle)
self.assertEqual(1, len(resource_tracker2.resources))
self.assertEqual("test2", resource_tracker2.resources[0].resource_handle)
self.assertEqual(2, len(resource_tracker.resources))
self.assertEqual("test1", resource_tracker.resources[0].resource_handle)
self.assertEqual("test2", resource_tracker.resources[1].resource_handle)
| ResourceTrackerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/pyodbc.py | {
"start": 2676,
"end": 5086
} | class ____(PyODBCConnector, MySQLDialect):
supports_statement_cache = True
colspecs = util.update_copy(MySQLDialect.colspecs, {Time: _pyodbcTIME})
supports_unicode_statements = True
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def _detect_charset(self, connection: Connection) -> str:
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
# set this to None as _fetch_setting attempts to use it (None is OK)
self._connection_charset = None
try:
value = self._fetch_setting(connection, "character_set_client")
if value:
return value
except exc.DBAPIError:
pass
util.warn(
"Could not detect the connection character set. "
"Assuming latin1."
)
return "latin1"
def _get_server_version_info(
self, connection: Connection
) -> tuple[int, ...]:
return MySQLDialect._get_server_version_info(self, connection)
def _extract_error_code(self, exception: BaseException) -> Optional[int]:
m = re.compile(r"\((\d+)\)").search(str(exception.args))
if m is None:
return None
c: Optional[str] = m.group(1)
if c:
return int(c)
else:
return None
def on_connect(self) -> Callable[[DBAPIConnection], None]:
super_ = super().on_connect()
def on_connect(conn: DBAPIConnection) -> None:
if super_ is not None:
super_(conn)
# declare Unicode encoding for pyodbc as per
# https://github.com/mkleehammer/pyodbc/wiki/Unicode
pyodbc_SQL_CHAR = 1 # pyodbc.SQL_CHAR
pyodbc_SQL_WCHAR = -8 # pyodbc.SQL_WCHAR
conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8")
conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8")
conn.setencoding(encoding="utf-8")
return on_connect
dialect = MySQLDialect_pyodbc
| MySQLDialect_pyodbc |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 94850,
"end": 95372
} | class ____:
# emit_linenums boolean write #line pragmas?
# emit_code_comments boolean copy the original code into C comments?
# c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
self.emit_code_comments = emit_code_comments
self.emit_linenums = emit_linenums
self.c_line_in_traceback = c_line_in_traceback
| CCodeConfig |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 46146,
"end": 49600
} | class ____(IR):
"""
Input from an existing polars DataFrame.
This typically arises from ``q.collect().lazy()``
"""
__slots__ = ("_id_for_hash", "df", "projection")
_non_child = ("schema", "df", "projection")
df: Any
"""Polars internal PyDataFrame object."""
projection: tuple[str, ...] | None
"""List of columns to project out."""
def __init__(
self,
schema: Schema,
df: Any,
projection: Sequence[str] | None,
):
self.schema = schema
self.df = df
self.projection = tuple(projection) if projection is not None else None
self._non_child_args = (
schema,
pl.DataFrame._from_pydf(df),
self.projection,
)
self.children = ()
self._id_for_hash = random.randint(0, 2**64 - 1)
@staticmethod
def _reconstruct(
schema: Schema,
pl_df: pl.DataFrame,
projection: Sequence[str] | None,
id_for_hash: int,
) -> DataFrameScan: # pragma: no cover
"""
Reconstruct a DataFrameScan from pickled data.
Parameters
----------
schema: Schema
The schema of the DataFrameScan.
pl_df: pl.DataFrame
The underlying polars DataFrame.
projection: Sequence[str] | None
The projection of the DataFrameScan.
id_for_hash: int
The id for hash of the DataFrameScan.
Returns
-------
The reconstructed DataFrameScan.
"""
node = DataFrameScan(schema, pl_df._df, projection)
node._id_for_hash = id_for_hash
return node
def __reduce__(self) -> tuple[Any, ...]: # pragma: no cover
"""Pickle a DataFrameScan object."""
return (
self._reconstruct,
(*self._non_child_args, self._id_for_hash),
)
def get_hashable(self) -> Hashable:
"""
Hashable representation of the node.
The (heavy) dataframe object is not hashed. No two instances of
``DataFrameScan`` will have the same hash, even if they have the
same schema, projection, and config options, and data.
"""
schema_hash = tuple(self.schema.items())
return (
type(self),
schema_hash,
self._id_for_hash,
self.projection,
)
def is_equal(self, other: Self) -> bool:
"""Equality of DataFrameScan nodes."""
return self is other or (
self._id_for_hash == other._id_for_hash
and self.schema == other.schema
and self.projection == other.projection
and pl.DataFrame._from_pydf(self.df).equals(
pl.DataFrame._from_pydf(other.df)
)
)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="DataFrameScan")
def do_evaluate(
cls,
schema: Schema,
df: Any,
projection: tuple[str, ...] | None,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
if projection is not None:
df = df.select(projection)
df = DataFrame.from_polars(df, stream=context.get_cuda_stream())
assert all(
c.obj.type() == dtype.plc_type
for c, dtype in zip(df.columns, schema.values(), strict=True)
)
return df
| DataFrameScan |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 209904,
"end": 210491
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"branch_protection_rule",
"conflicting_branch_protection_rule",
"ref",
)
branch_protection_rule = sgqlc.types.Field(
"BranchProtectionRule", graphql_name="branchProtectionRule"
)
conflicting_branch_protection_rule = sgqlc.types.Field(
"BranchProtectionRule", graphql_name="conflictingBranchProtectionRule"
)
ref = sgqlc.types.Field("Ref", graphql_name="ref")
| BranchProtectionRuleConflict |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_to_be_equal_to_or_less_than_profile_max.py | {
"start": 864,
"end": 2774
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.less_than_or_equal_to_profile_max"
condition_value_keys = ("profile",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls: Any, column: str, profile: Any, **kwargs) -> np.ndarray:
columnPresent = (
column.name
in profile["global_stats"]["profile_schema"] # checks to ensure column exists
)
transpose = np.array(column).T # Turns column into a row format for return
if not (columnPresent): # Err column in user DF not present in input profile
return transpose != transpose # Returns 100% unexpected
index = profile["global_stats"]["profile_schema"][column.name][
0
] # Gets index of column from profile
dataType = profile["data_stats"][index]["data_type"] # Checks datatype
if dataType != "int" and dataType != "float": # Err non-numerical column
return transpose != transpose # Returns 100% unexpected
maximum = float(profile["data_stats"][index]["statistics"]["max"])
return transpose <= maximum
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesLessThanOrEqualToProfileMax |
python | dagster-io__dagster | python_modules/libraries/dagster-deltalake/dagster_deltalake/io_manager.py | {
"start": 1377,
"end": 1766
} | class ____(TypedDict):
root_uri: str
mode: WriteMode
overwrite_schema: bool
writer_engine: WriterEngine
storage_options: _StorageOptionsConfig
client_options: NotRequired[dict[str, str]]
table_config: NotRequired[dict[str, str]]
custom_metadata: NotRequired[dict[str, str]]
writer_properties: NotRequired[dict[str, str]]
| _DeltaTableIOManagerResourceConfig |
python | google__pytype | pytype/matcher_test.py | {
"start": 19290,
"end": 22705
} | class ____(MatcherTestBase):
"""Test matching TypeVar against various types."""
def test_match_from_mro(self):
# A TypeParameter never matches anything in match_from_mro, since its mro is
# empty. This test is mostly to make sure we don't crash.
self.assertIsNone(
self.matcher.match_from_mro(
abstract.TypeParameter("T", self.ctx), self.ctx.convert.int_type
)
)
def test_compute_matches(self):
x_val = abstract.TypeParameter("T", self.ctx)
args = [
types.Arg(
name="x",
value=x_val.to_variable(self.ctx.root_node),
typ=self.ctx.convert.unsolvable,
)
]
(match,) = self.matcher.compute_matches(args, match_all_views=True)
self.assertEqual(match.subst, {})
def test_compute_matches_no_match(self):
x_val = abstract.TypeParameter("T", self.ctx)
args = [
types.Arg(
name="x",
value=x_val.to_variable(self.ctx.root_node),
typ=self.ctx.convert.int_type,
)
]
with self.assertRaises(error_types.MatchError):
self.matcher.compute_matches(args, match_all_views=True)
def test_compute_one_match(self):
self.assertTrue(
self.matcher.compute_one_match(
abstract.TypeParameter("T", self.ctx).to_variable(
self.ctx.root_node
),
self.ctx.convert.unsolvable,
).success
)
def test_compute_one_match_no_match(self):
self.assertFalse(
self.matcher.compute_one_match(
abstract.TypeParameter("T", self.ctx).to_variable(
self.ctx.root_node
),
self.ctx.convert.int_type,
).success
)
def test_any(self):
self.assertMatch(
abstract.TypeParameter("T", self.ctx), self.ctx.convert.unsolvable
)
def test_object(self):
self.assertMatch(
abstract.TypeParameter("T", self.ctx), self.ctx.convert.object_type
)
def test_type(self):
self.assertMatch(
abstract.TypeParameter("T", self.ctx), self.ctx.convert.type_type
)
def test_parameterized_type(self):
self.assertMatch(
abstract.TypeParameter("T", self.ctx),
abstract.ParameterizedClass(
self.ctx.convert.type_type,
{abstract_utils.T: self.ctx.convert.unsolvable},
self.ctx,
),
)
def test_parameterized_type_no_match(self):
self.assertNoMatch(
abstract.TypeParameter("T", self.ctx),
abstract.ParameterizedClass(
self.ctx.convert.type_type,
{abstract_utils.T: self.ctx.convert.int_type},
self.ctx,
),
)
def test_nested(self):
self.assertMatch(
abstract.ParameterizedClass(
self.ctx.convert.list_type,
{abstract_utils.T: abstract.TypeParameter("T", self.ctx)},
self.ctx,
),
self.ctx.convert.type_type,
)
def test_nested_no_match(self):
self.assertNoMatch(
abstract.ParameterizedClass(
self.ctx.convert.list_type,
{abstract_utils.T: abstract.TypeParameter("T", self.ctx)},
self.ctx,
),
self.ctx.convert.list_type,
)
def test_no_match(self):
self.assertNoMatch(
abstract.TypeParameter("T", self.ctx), self.ctx.convert.int_type
)
if __name__ == "__main__":
unittest.main()
| TypeVarTest |
python | cherrypy__cherrypy | cherrypy/process/plugins.py | {
"start": 21638,
"end": 25887
} | class ____(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can
adjust the ``match`` attribute, a regular expression. For example,
to stop monitoring cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
"""Initialize the auto-reloader monitor plugin."""
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
search_mod_names = filter(
re.compile(self.match).match,
list(sys.modules.keys()),
)
mods = map(sys.modules.get, search_mod_names)
return set(filter(None, map(self._file_for_module, mods)))
@classmethod
def _file_for_module(cls, module):
"""Return the relevant file for the module."""
return cls._archive_for_zip_module(
module,
) or cls._file_for_file_module(module)
@staticmethod
def _archive_for_zip_module(module):
"""Return the archive filename for the module if relevant."""
try:
return module.__loader__.archive
except AttributeError:
pass
@classmethod
def _file_for_file_module(cls, module):
"""Return the file for the module."""
try:
return module.__file__ and cls._make_absolute(module.__file__)
except AttributeError:
pass
@staticmethod
def _make_absolute(filename):
"""Ensure filename is absolute to avoid effect of os.chdir."""
return (
filename
if os.path.isabs(filename)
else (
os.path.normpath(os.path.join(_module__file__base, filename))
)
)
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log(
'Restarting because %s changed.' % filename,
)
self.thread.cancel()
self.bus.log('Stopped thread %r.' % self.thread.name)
self.bus.restart()
return
| Autoreloader |
python | psf__black | tests/data/cases/fmtonoff5.py | {
"start": 3362,
"end": 3619
} | class ____(t.Protocol):
def this_will_be_formatted(self, **kwargs) -> Named: ...
# fmt: on
# Regression test for https://github.com/psf/black/issues/3436.
if x:
return x
# fmt: off
elif unformatted:
# fmt: on
will_be_formatted()
| Factory |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_airflow_common.py | {
"start": 914,
"end": 24212
} | class ____:
"""
Tests that apply to more than 1 Airflow component so we don't have to repeat tests everywhere.
The one general exception will be the KubernetesExecutor PodTemplateFile, as it requires extra test setup.
"""
@pytest.mark.parametrize(
("logs_values", "expected_mount"),
[
(
{"persistence": {"enabled": True, "subPath": "test/logs"}},
{"subPath": "test/logs", "mountPath": "/opt/airflow/logs", "name": "logs"},
),
],
)
def test_logs_mount(self, logs_values, expected_mount):
docs = render_chart(
values={
"logs": logs_values,
"airflowVersion": "3.0.0",
}, # airflowVersion is present so webserver gets the mount
show_only=[
"templates/api-server/api-server-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/workers/worker-deployment.yaml",
],
)
assert len(docs) == 5
for doc in docs:
assert expected_mount in jmespath.search("spec.template.spec.containers[0].volumeMounts", doc)
# check for components deployed when airflow version is < 3.0.0
docs = render_chart(
values={
"logs": logs_values,
"airflowVersion": "1.10.15",
}, # airflowVersion is present so webserver gets the mount
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
],
)
assert len(docs) == 3
for doc in docs:
assert expected_mount in jmespath.search("spec.template.spec.containers[0].volumeMounts", doc)
@pytest.mark.parametrize(
("dag_values", "expected_mount"),
[
(
{"gitSync": {"enabled": True}},
{
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": True,
},
),
(
{"persistence": {"enabled": True}},
{
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": False,
},
),
(
{
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
},
{
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": True,
},
),
(
{"persistence": {"enabled": True, "subPath": "test/dags"}},
{
"subPath": "test/dags",
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": False,
},
),
(
{"mountPath": "/opt/airflow/dags/custom", "gitSync": {"enabled": True}},
{
"mountPath": "/opt/airflow/dags/custom",
"name": "dags",
"readOnly": True,
},
),
(
{"mountPath": "/opt/airflow/dags/custom", "persistence": {"enabled": True}},
{
"mountPath": "/opt/airflow/dags/custom",
"name": "dags",
"readOnly": False,
},
),
],
)
def test_dags_mount(self, dag_values, expected_mount):
docs = render_chart(
values={
"dags": dag_values,
"airflowVersion": "1.10.15",
}, # airflowVersion is present so webserver gets the mount
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
],
)
assert len(docs) == 3
for doc in docs:
assert expected_mount in jmespath.search("spec.template.spec.containers[0].volumeMounts", doc)
def test_webserver_config_configmap_name_volume_mounts(self):
configmap_name = "my-configmap"
docs = render_chart(
values={
"webserver": {
"webserverConfig": "CSRF_ENABLED = True # {{ .Release.Name }}",
"webserverConfigConfigMapName": configmap_name,
},
"workers": {"kerberosSidecar": {"enabled": True}},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
],
)
for doc in docs:
assert "webserver-config" in [
c["name"]
for r in jmespath.search(
"spec.template.spec.initContainers[?name=='wait-for-airflow-migrations'].volumeMounts",
doc,
)
for c in r
]
for container in jmespath.search("spec.template.spec.containers", doc):
assert "webserver-config" in [c["name"] for c in jmespath.search("volumeMounts", container)]
assert "webserver-config" in [
c["name"] for c in jmespath.search("spec.template.spec.volumes", doc)
]
assert configmap_name == jmespath.search(
"spec.template.spec.volumes[?name=='webserver-config'].configMap.name | [0]", doc
)
def test_annotations(self):
"""
Test Annotations are correctly applied.
Verifies all pods created Scheduler, Webserver & Worker deployments.
"""
release_name = "test-basic"
k8s_objects = render_chart(
name=release_name,
values={
"airflowPodAnnotations": {"test-annotation/safe-to-evict": "true"},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"dagProcessor": {"enabled": True},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/api-server/api-server-deployment.yaml",
"templates/flower/flower-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/cleanup/cleanup-cronjob.yaml",
"templates/database-cleanup/database-cleanup-cronjob.yaml",
],
)
# Objects in show_only are 9 but only one of Webserver or API server is created so we have 8 objects
assert len(k8s_objects) == 8
for k8s_object in k8s_objects:
if k8s_object["kind"] == "CronJob":
annotations = k8s_object["spec"]["jobTemplate"]["spec"]["template"]["metadata"]["annotations"]
else:
annotations = k8s_object["spec"]["template"]["metadata"]["annotations"]
assert "test-annotation/safe-to-evict" in annotations
assert "true" in annotations["test-annotation/safe-to-evict"]
def test_global_affinity_tolerations_topology_spread_constraints_and_node_selector(self):
"""Test affinity, tolerations, etc are correctly applied on all pods created."""
k8s_objects = render_chart(
values={
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
"dagProcessor": {"enabled": True},
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "static-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"topologySpreadConstraints": [
{
"maxSkew": 1,
"topologyKey": "foo",
"whenUnsatisfiable": "ScheduleAnyway",
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
],
"nodeSelector": {"type": "user-node"},
},
show_only=[
"templates/cleanup/cleanup-cronjob.yaml",
"templates/database-cleanup/database-cleanup-cronjob.yaml",
"templates/flower/flower-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/api-server/api-server-deployment.yaml",
"templates/workers/worker-deployment.yaml",
],
)
# Objects in show_only are 14 but only one of Webserver or API server is created so we have 13 objects
assert len(k8s_objects) == 13
for k8s_object in k8s_objects:
if k8s_object["kind"] == "CronJob":
podSpec = jmespath.search("spec.jobTemplate.spec.template.spec", k8s_object)
else:
podSpec = jmespath.search("spec.template.spec", k8s_object)
assert (
jmespath.search(
"affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
podSpec,
)
== "foo"
)
assert jmespath.search("nodeSelector.type", podSpec) == "user-node"
assert jmespath.search("tolerations[0].key", podSpec) == "static-pods"
assert jmespath.search("topologySpreadConstraints[0].topologyKey", podSpec) == "foo"
@pytest.mark.parametrize(
("expected_image", "tag", "digest"),
[
("apache/airflow:user-tag", "user-tag", None),
("apache/airflow@user-digest", None, "user-digest"),
("apache/airflow@user-digest", "user-tag", "user-digest"),
],
)
def test_should_use_correct_image(self, expected_image, tag, digest):
docs = render_chart(
values={
"images": {
"airflow": {
"repository": "apache/airflow",
"tag": tag,
"digest": digest,
},
},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
for doc in docs:
assert expected_image == jmespath.search("spec.template.spec.initContainers[0].image", doc)
@pytest.mark.parametrize(
("expected_image", "tag", "digest"),
[
("apache/airflow:user-tag", "user-tag", None),
("apache/airflow@user-digest", None, "user-digest"),
("apache/airflow@user-digest", "user-tag", "user-digest"),
],
)
def test_should_use_correct_default_image(self, expected_image, tag, digest):
docs = render_chart(
values={
"defaultAirflowRepository": "apache/airflow",
"defaultAirflowTag": tag,
"defaultAirflowDigest": digest,
"images": {"useDefaultImageForMigration": True},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
for doc in docs:
assert expected_image == jmespath.search("spec.template.spec.initContainers[0].image", doc)
def test_should_set_correct_helm_hooks_weight(self):
docs = render_chart(
show_only=["templates/secrets/fernetkey-secret.yaml"],
)
annotations = jmespath.search("metadata.annotations", docs[0])
assert annotations["helm.sh/hook-weight"] == "0"
def test_should_disable_some_variables(self):
docs = render_chart(
values={
"enableBuiltInSecretEnvVars": {
"AIRFLOW__CORE__SQL_ALCHEMY_CONN": False,
"AIRFLOW__DATABASE__SQL_ALCHEMY_CONN": False,
"AIRFLOW__API__SECRET_KEY": False,
"AIRFLOW__API_AUTH__JWT_SECRET": False,
"AIRFLOW__WEBSERVER__SECRET_KEY": False,
# the following vars only appear if remote logging is set, so disabling them in this test is kind of a no-op
"AIRFLOW__ELASTICSEARCH__HOST": False,
"AIRFLOW__ELASTICSEARCH__ELASTICSEARCH_HOST": False,
"AIRFLOW__OPENSEARCH__HOST": False,
},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
expected_vars = [
"AIRFLOW__CORE__FERNET_KEY",
"AIRFLOW_HOME",
"AIRFLOW_CONN_AIRFLOW_DB",
"AIRFLOW__CELERY__BROKER_URL",
]
expected_vars_in_worker = ["DUMB_INIT_SETSID"] + expected_vars
for doc in docs:
component = doc["metadata"]["labels"]["component"]
variables = expected_vars_in_worker if component == "worker" else expected_vars
assert variables == jmespath.search("spec.template.spec.containers[0].env[*].name", doc), (
f"Wrong vars in {component}"
)
def test_have_all_variables(self):
docs = render_chart(
values={},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
expected_vars = [
"AIRFLOW__CORE__FERNET_KEY",
"AIRFLOW_HOME",
"AIRFLOW__CORE__SQL_ALCHEMY_CONN",
"AIRFLOW__DATABASE__SQL_ALCHEMY_CONN",
"AIRFLOW_CONN_AIRFLOW_DB",
"AIRFLOW__API__SECRET_KEY",
"AIRFLOW__API_AUTH__JWT_SECRET",
"AIRFLOW__CELERY__BROKER_URL",
]
expected_vars_in_worker = ["DUMB_INIT_SETSID"] + expected_vars
for doc in docs:
component = doc["metadata"]["labels"]["component"]
variables = expected_vars_in_worker if component == "worker" else expected_vars
assert variables == jmespath.search("spec.template.spec.containers[0].env[*].name", doc), (
f"Wrong vars in {component}"
)
def test_have_all_config_mounts_on_init_containers(self):
docs = render_chart(
values={
"dagProcessor": {"enabled": True},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/api-server/api-server-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
assert len(docs) == 5
expected_mount = {
"subPath": "airflow.cfg",
"name": "config",
"readOnly": True,
"mountPath": "/opt/airflow/airflow.cfg",
}
for doc in docs:
assert expected_mount in jmespath.search("spec.template.spec.initContainers[0].volumeMounts", doc)
def test_priority_class_name(self):
docs = render_chart(
values={
"flower": {"enabled": True, "priorityClassName": "low-priority-flower"},
"pgbouncer": {"enabled": True, "priorityClassName": "low-priority-pgbouncer"},
"scheduler": {"priorityClassName": "low-priority-scheduler"},
"statsd": {"priorityClassName": "low-priority-statsd"},
"triggerer": {"priorityClassName": "low-priority-triggerer"},
"dagProcessor": {"priorityClassName": "low-priority-dag-processor"},
"webserver": {"priorityClassName": "low-priority-webserver"},
"workers": {"priorityClassName": "low-priority-worker"},
"cleanup": {"enabled": True, "priorityClassName": "low-priority-airflow-cleanup-pods"},
"databaseCleanup": {"enabled": True, "priorityClassName": "low-priority-database-cleanup"},
"migrateDatabaseJob": {"priorityClassName": "low-priority-run-airflow-migrations"},
"createUserJob": {"priorityClassName": "low-priority-create-user-job"},
},
show_only=[
"templates/flower/flower-deployment.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/cleanup/cleanup-cronjob.yaml",
"templates/database-cleanup/database-cleanup-cronjob.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/jobs/create-user-job.yaml",
],
)
assert len(docs) == 11
for doc in docs:
component = doc["metadata"]["labels"]["component"]
if component in ["airflow-cleanup-pods", "database-cleanup"]:
priority = doc["spec"]["jobTemplate"]["spec"]["template"]["spec"]["priorityClassName"]
else:
priority = doc["spec"]["template"]["spec"]["priorityClassName"]
assert priority == f"low-priority-{component}"
@pytest.mark.parametrize(
("image_pull_secrets", "registry_secret_name", "registry_connection", "expected_image_pull_secrets"),
[
([], None, {}, []),
(
[],
None,
{"host": "example.com", "user": "user", "pass": "pass", "email": "user@example.com"},
["test-basic-registry"],
),
([], "regcred", {}, ["regcred"]),
(["regcred2"], "regcred", {}, ["regcred2"]),
(
["regcred2"],
None,
{"host": "example.com", "user": "user", "pass": "pass", "email": "user@example.com"},
["regcred2"],
),
(["regcred", {"name": "regcred2"}, ""], None, {}, ["regcred", "regcred2"]),
],
)
def test_image_pull_secrets(
self, image_pull_secrets, registry_secret_name, registry_connection, expected_image_pull_secrets
):
release_name = "test-basic"
docs = render_chart(
name=release_name,
values={
"imagePullSecrets": image_pull_secrets,
"registry": {"secretName": registry_secret_name, "connection": registry_connection},
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
},
show_only=[
"templates/flower/flower-deployment.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/cleanup/cleanup-cronjob.yaml",
"templates/database-cleanup/database-cleanup-cronjob.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/jobs/create-user-job.yaml",
],
)
expected_image_pull_secrets = [{"name": name} for name in expected_image_pull_secrets]
for doc in docs:
got_image_pull_secrets = (
doc["spec"]["jobTemplate"]["spec"]["template"]["spec"]["imagePullSecrets"]
if doc["kind"] == "CronJob"
else doc["spec"]["template"]["spec"]["imagePullSecrets"]
)
assert got_image_pull_secrets == expected_image_pull_secrets
| TestAirflowCommon |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 42348,
"end": 44526
} | class ____(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| CausalLMOutputWithPast |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 14953,
"end": 16371
} | class ____:
def get_data(self,x_stride=1,y_stride=1):
rng = np.random.default_rng(1234)
alpha = array(1., dtype = self.dtype)
a = rng.normal(0.,1.,(3,3)).astype(self.dtype)
x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
return alpha,a,x,y
def test_simple(self):
alpha,a,x,y = self.get_data()
# transpose takes care of Fortran vs. C(and Python) memory layout
desired_a = alpha*transpose(x[:,newaxis]*y) + a
self.blas_func(x,y,a)
assert_array_almost_equal(desired_a,a)
def test_x_stride(self):
alpha,a,x,y = self.get_data(x_stride=2)
desired_a = alpha*transpose(x[::2,newaxis]*y) + a
self.blas_func(x,y,a,incx=2)
assert_array_almost_equal(desired_a,a)
def test_x_stride_assert(self):
alpha,a,x,y = self.get_data(x_stride=2)
with pytest.raises(ValueError, match='foo'):
self.blas_func(x,y,a,incx=3)
def test_y_stride(self):
alpha,a,x,y = self.get_data(y_stride=2)
desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
self.blas_func(x,y,a,incy=2)
assert_array_almost_equal(desired_a,a)
def test_y_stride_assert(self):
alpha,a,x,y = self.get_data(y_stride=2)
with pytest.raises(ValueError, match='foo'):
self.blas_func(a,x,y,incy=3)
| BaseGer |
python | numpy__numpy | numpy/distutils/misc_util.py | {
"start": 1572,
"end": 24010
} | class ____:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
"""Quote list of arguments.
.. deprecated:: 1.22.
"""
import warnings
warnings.warn('"quote_args" is deprecated.',
DeprecationWarning, stacklevel=2)
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
split = name.split('/')
return os.path.join(*split)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(bg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path: str) -> str:
"""Convert a path from Cygwin-native to Windows-native.
Uses the cygpath utility (part of the Base install) to do the
actual conversion. Falls back to returning the original path if
this fails.
Handles the default ``/cygdrive`` mount prefix as well as the
``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such
as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or
``/home/username``
Parameters
----------
path : str
The path to convert
Returns
-------
converted_path : str
The converted path
Notes
-----
Documentation for cygpath utility:
https://cygwin.com/cygwin-ug-net/cygpath.html
Documentation for the C function it wraps:
https://cygwin.com/cygwin-api/func-cygwin-conv-path.html
"""
if sys.platform != "cygwin":
return path
return subprocess.check_output(
["/usr/bin/cygpath", "--windows", path], text=True
)
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source) as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
return all(is_string(item) for item in lst)
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
return any(fortran_ext_match(source) for source in sources)
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
return any(cxx_ext_match(source) for source in sources)
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149.
"""
confvars = distutils.sysconfig.get_config_vars()
so_ext = confvars.get('EXT_SUFFIX', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
| InstallableLib |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0096_delete_non_single_written_fire_history.py | {
"start": 669,
"end": 2034
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("workflow_engine", "0095_unique_detectorgroup_group"),
]
operations = [
migrations.RunPython(
code=delete_non_single_written_fire_history,
reverse_code=migrations.RunPython.noop,
hints={"tables": ["workflow_engine_workflowfirehistory"]},
),
]
| Migration |
python | facebook__pyre-check | tools/generate_taint_models/get_dynamic_graphql_sources.py | {
"start": 1211,
"end": 4107
} | class ____(ModelGenerator[CallableModel]):
def __init__(
self,
# pyre-fixme[11]: Annotation `GraphQLSchema` is not defined as a type.
graphql_schema: GraphQLSchema,
graphql_object_type: GraphQLObjectType,
annotations: AnnotationSpecification,
formattable_return: Optional[DynamicGraphQLFormattableSpecification] = None,
resolvers_to_exclude: Optional[List[str]] = None,
) -> None:
super().__init__()
self.graphql_schema: GraphQLSchema = graphql_schema
self.graphql_object_type: GraphQLObjectType = graphql_object_type
if formattable_return and annotations.returns:
raise ModelGenerationException("Setting a returns in annotations will be overwritten when specifying a formattable_return")
self.annotations: AnnotationSpecification = annotations
self.formattable_return: Optional[DynamicGraphQLFormattableSpecification] = formattable_return
self.resolvers_to_exclude: List[str] = resolvers_to_exclude or []
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[CallableModel]:
type_map = self.graphql_schema.type_map
# Get all graphql resolver functions.
entry_points: Set[CallableModel] = set()
for element in type_map.values():
if not isinstance(element, self.graphql_object_type):
continue
try:
fields = element.fields
gql_object_name = element.name
except AssertionError:
# GraphQL throws an exception when a GraphQL object is created
# with 0 fields. Since we don't control the library, we need to
# program defensively here :(
continue
for field in fields:
resolver = fields[field].resolve
if (
resolver is not None
and resolver.__name__ != "<lambda>"
and f"{resolver.__module__}.{resolver.__name__}"
not in self.resolvers_to_exclude
):
annotation = self.annotations
if self.formattable_return:
formatted_return = self.formattable_return.format(gql_object_name, field)
annotation = self.annotations._replace(returns=formatted_return)
try:
model = CallableModel(
callable_object=resolver, annotations=annotation
)
entry_points.add(model)
except UnsupportedCallable:
pass
return sorted(entry_points)
| DynamicGraphQLSourceGenerator |
python | PyCQA__pylint | tests/extensions/test_private_import.py | {
"start": 530,
"end": 2700
} | class ____(CheckerTestCase):
"""The mocked dirname is the directory of the file being linted, the node is code inside that file."""
CHECKER_CLASS = private_import.PrivateImportChecker
@patch("pathlib.Path.parent")
def test_internal_module(self, parent: MagicMock) -> None:
parent.parts = ("", "dir", "module")
import_from = astroid.extract_node("""from module import _file""")
with self.assertNoMessages():
self.checker.visit_importfrom(import_from)
@patch("pathlib.Path.parent")
def test_external_module_nested(self, parent: MagicMock) -> None:
parent.parts = ("", "dir", "module", "module_files", "util")
import_from = astroid.extract_node("""from module import _file""")
with self.assertNoMessages():
self.checker.visit_importfrom(import_from)
@patch("pathlib.Path.parent")
def test_external_module_dot_import(self, parent: MagicMock) -> None:
parent.parts = ("", "dir", "outer", "inner", "module_files", "util")
import_from = astroid.extract_node("""from outer.inner import _file""")
with self.assertNoMessages():
self.checker.visit_importfrom(import_from)
@patch("pathlib.Path.parent")
def test_external_module_dot_import_outer_only(self, parent: MagicMock) -> None:
parent.parts = ("", "dir", "outer", "extensions")
import_from = astroid.extract_node("""from outer.inner import _file""")
with self.assertNoMessages():
self.checker.visit_importfrom(import_from)
@patch("pathlib.Path.parent")
def test_external_module(self, parent: MagicMock) -> None:
parent.parts = ("", "dir", "other")
import_from = astroid.extract_node("""from module import _file""")
msg = MessageTest(
msg_id="import-private-name",
node=import_from,
line=1,
col_offset=0,
end_line=1,
end_col_offset=24,
args=("object", "_file"),
confidence=HIGH,
)
with self.assertAddsMessages(msg):
self.checker.visit_importfrom(import_from)
| TestPrivateImport |
python | kamyu104__LeetCode-Solutions | Python/path-with-maximum-minimum-value.py | {
"start": 1415,
"end": 2168
} | class ____(object):
def maximumMinimumPath(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
max_heap = [(-A[0][0], 0, 0)]
lookup = set([(0, 0)])
while max_heap:
i, r, c = heapq.heappop(max_heap)
if r == len(A)-1 and c == len(A[0])-1:
return -i
for d in directions:
nr, nc = r+d[0], c+d[1]
if 0 <= nr < len(A) and \
0 <= nc < len(A[0]) and \
(nr, nc) not in lookup:
heapq.heappush(max_heap, (-min(-i, A[nr][nc]), nr, nc))
lookup.add((nr, nc))
return -1
| Solution2 |
python | simonw__datasette | datasette/views/base.py | {
"start": 604,
"end": 991
} | class ____(Exception):
def __init__(
self,
message,
title=None,
error_dict=None,
status=500,
template=None,
message_is_html=False,
):
self.message = message
self.title = title
self.error_dict = error_dict or {}
self.status = status
self.message_is_html = message_is_html
| DatasetteError |
python | pytorch__pytorch | torch/_higher_order_ops/flex_attention.py | {
"start": 3504,
"end": 21572
} | class ____(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("flex_attention_backward", cacheable=True)
def __call__(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
out: torch.Tensor,
logsumexp: torch.Tensor,
grad_out: torch.Tensor,
grad_logsumexp: torch.Tensor,
fw_graph: Union[Callable, GraphModule],
joint_graph: GraphModule,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[
torch.Tensor, torch.Tensor, torch.Tensor, tuple[Optional[torch.Tensor], ...]
]:
validate_subgraph_args_types(score_mod_other_buffers + mask_mod_other_buffers)
return super().__call__(
query,
key,
value,
out,
logsumexp,
grad_out,
grad_logsumexp,
fw_graph,
joint_graph,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
flex_attention_backward = FlexAttentionBackwardHOP()
def _math_attention_inner(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor]:
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
working_precision = torch.float64 if query.dtype == torch.float64 else torch.float32
scores = query.to(working_precision) @ key.to(working_precision).transpose(-2, -1)
b = torch.arange(0, scores.size(0), device=scores.device)
h = torch.arange(0, scores.size(1), device=scores.device)
m = torch.arange(0, scores.size(2), device=scores.device)
n = torch.arange(0, scores.size(3), device=scores.device)
captured_buffers_in_dim = (None,) * len(score_mod_other_buffers)
from torch.nn.attention.flex_attention import _vmap_for_bhqkv
# first input is score
score_mod = _vmap_for_bhqkv(score_mod, prefix=(0,), suffix=captured_buffers_in_dim)
mask_mod = block_mask[-1]
mask_mod_in_dim_buffers = (None,) * len(mask_mod_other_buffers)
mask_mod = _vmap_for_bhqkv(mask_mod, prefix=(), suffix=mask_mod_in_dim_buffers)
with TransformGetItemToIndex():
scores = (scores * scale).to(working_precision)
post_mod_scores = torch.where(
mask_mod(b, h, m, n, *mask_mod_other_buffers),
score_mod(scores, b, h, m, n, *score_mod_other_buffers),
torch.tensor(-float("inf"), dtype=working_precision, device=scores.device),
)
return scores, post_mod_scores
def math_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Eager implementation
This implementation uses vmap to vectorize the score_mod function over the batch, head, m, and n dimensions.
We then apply the vectorized score_mod function to the scores matrix. Each wrap of vmap applies one of the
batch, head, m, or n dimensions. We need to apply vmap 4 times to vectorized over all 4 dimensions.
Args:
query: The query tensor
key: The key tensor
value: The value tensor
score_mod: The score_mod function
other_buffers: Other buffers that are passed to the score_mod function
"""
# broadcast query & key along head dim for GQA
G = query.size(1) // key.size(1)
value = torch.repeat_interleave(value, G, dim=1)
key = torch.repeat_interleave(key, G, dim=1)
Bq, Bkv = query.size(0), key.size(0)
if not ((Bq == Bkv) or (Bq > 1 and Bkv == 1)):
raise RuntimeError(f"Bq and Bkv must broadcast. Got Bq={Bq} and Bkv={Bkv}")
key = key.expand((Bq, *key.size()[1:]))
value = value.expand((Bq, *value.size()[1:]))
_, post_mod_scores = _math_attention_inner(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
# Set fully masked rows' sumexp to 0.0
logsumexp = post_mod_scores.logsumexp(dim=-1)
masked_rows = torch.all(post_mod_scores == -float("inf"), dim=-1)
logsumexp = torch.where(masked_rows, -float("inf"), logsumexp)
# working precision will be used so no need to cast to fp32
max_scores = torch.max(post_mod_scores, dim=-1)[0]
post_mod_scores = torch._safe_softmax(post_mod_scores, dim=-1)
# NB: kernel computes in ln2 space, we always convert back at the top level op, so
# for math impl we divide by log(2) because we will multiply by log(2)
return (
post_mod_scores.to(query.dtype) @ value,
logsumexp / math.log(2),
max_scores / math.log(2),
)
@flex_attention.py_impl(DispatchKey.CompositeExplicitAutograd)
def sdpa_dense(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out, lse, max_scores = math_attention(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
out = _permute_strides(out, query.stride())
return out, lse, max_scores
def trace_flex_attention(
proxy_mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Traces the flex_attention operator with the given score_mod function and other_buffers.
Trace SDPA will call make_fx with "fake" example vals and then trace the score_mod function
This will produce a GraphModule that will be stored on the root tracer as "sdpa_score". We
access this graph module in inductor to inline the score_mod function to the triton template.
"""
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
example_out = flex_attention(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
example_vals = [query.new_zeros((), requires_grad=query.requires_grad)] + [
query.new_zeros((), dtype=torch.int) for _ in range(4)
]
mask_example_vals = [query.new_zeros((), dtype=torch.int) for _ in range(4)]
mask_mod = block_mask[-1]
with TransformGetItemToIndex():
score_graph = reenter_make_fx(score_mod)(
*example_vals, *score_mod_other_buffers
)
mask_graph = reenter_make_fx(mask_mod)(
*mask_example_vals, *mask_mod_other_buffers
)
assert isinstance(proxy_mode.tracer, torch.fx.Tracer)
block_mask = block_mask[:-1] + (mask_graph,)
qualname = proxy_mode.tracer.get_fresh_qualname("sdpa_score")
proxy_mode.tracer.root.register_module(qualname, score_graph)
mask_qualname = proxy_mode.tracer.get_fresh_qualname("sdpa_mask")
proxy_mode.tracer.root.register_module(mask_qualname, mask_graph)
node_args = (
query,
key,
value,
score_graph,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
# pyrefly: ignore [missing-attribute]
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
with torch.fx.experimental.proxy_tensor.set_original_aten_op(flex_attention):
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", flex_attention, proxy_args, {}
)
return track_tensor_tree(
example_out,
out_proxy,
constant=None,
# pyrefly: ignore [bad-argument-type]
tracer=proxy_mode.tracer,
)
@flex_attention.py_impl(ProxyTorchDispatchMode)
def flex_attention_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert mode is not None, "Mode should always be enabled for python fallback key"
return trace_flex_attention(
mode,
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
@flex_attention.py_functionalize_impl
def flex_attention_functionalize(
ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Defines the functionalization rules for the flex_attention operator.
Write now we are unwrapping each tensor and then redispatching to the next, however we want to
guard against any mutations in the score_mod function, to the other_buffers since those
are free variables.
"""
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
if has_user_subclass(
(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
),
allowed_subclasses=(FakeTensor, FunctionalTensor),
):
return NotImplemented
query_unwrapped = ctx.unwrap_tensors(query)
key_unwrapped = ctx.unwrap_tensors(key)
value_unwrapped = ctx.unwrap_tensors(value)
block_mask_unwrapped = ctx.unwrap_tensors(block_mask)
score_mod_other_buffers_unwrapped = ctx.unwrap_tensors(score_mod_other_buffers)
mask_mod_other_buffers_unwrapped = ctx.unwrap_tensors(mask_mod_other_buffers)
# Appease the mypy overlords
assert isinstance(query_unwrapped, torch.Tensor)
assert isinstance(key_unwrapped, torch.Tensor)
assert isinstance(value_unwrapped, torch.Tensor)
assert isinstance(block_mask_unwrapped, tuple)
assert isinstance(score_mod_other_buffers_unwrapped, tuple)
assert isinstance(mask_mod_other_buffers_unwrapped, tuple)
example_vals = (
[query_unwrapped.new_zeros(())]
+ [query_unwrapped.new_zeros((), dtype=torch.int) for _ in range(4)]
+ list(score_mod_other_buffers_unwrapped)
)
with ctx.redispatch_to_next():
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
with TransformGetItemToIndex():
# TODO: So far only the input mutations are checked
# In the other HOPs, also aliases are checked which is
# omitted here
mutates = _has_potential_branch_input_mutation(
score_mod, example_vals, pre_dispatch
)
# The only care about mutations of existing buffers since we can't replay these.
# However, we can just error if anything is detected
if mutates:
raise UnsupportedAliasMutationException("Mutations detected in score_mod")
out = flex_attention(
query_unwrapped,
key_unwrapped,
value_unwrapped,
functional_score_mod,
block_mask_unwrapped,
scale,
kernel_options,
score_mod_other_buffers_unwrapped,
mask_mod_other_buffers_unwrapped,
)
return ctx.wrap_tensors(out) # type: ignore[return-value, arg-type]
@register_fake(flex_attention)
def flex_attention_fake_impl(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: tuple,
scale: float,
kernel_options: dict[str, Any],
score_mod_other_buffers: tuple = (),
mask_mod_other_buffers: tuple = (),
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if has_user_subclass(
(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
),
allowed_subclasses=(FakeTensor,),
):
return NotImplemented
v_head_dim = value.size(-1)
batch_size, num_heads, seq_len_q, _q_head_dim = query.shape
logsumexp = query.new_empty(batch_size, num_heads, seq_len_q, dtype=torch.float32)
max_scores = query.new_empty(batch_size, num_heads, seq_len_q, dtype=torch.float32)
out_shape = (batch_size, num_heads, seq_len_q, v_head_dim)
out = query.new_empty(out_shape)
out = _permute_strides(out, query.stride())
return out, logsumexp, max_scores
# Registers dispatches for SAC
redirect_to_mode(flex_attention, _CachingTorchDispatchMode)
redirect_to_mode(flex_attention, _CachedTorchDispatchMode)
# ---------------------------- Autograd Implementation ----------------------------
def create_fw_bw_graph(
score_mod: Callable,
index_values: tuple[Tensor, Tensor, Tensor, Tensor, Tensor],
other_buffers: tuple[Tensor, ...],
) -> tuple[Callable, Callable]:
# See Note:[HOP create fw_bw graph]
# All of these imports need to be here in order to avoid circular dependencies
from torch._dispatch.python import suspend_functionalization
from torch._functorch.aot_autograd import AOTConfig, create_joint
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.functional_tensor import disable_functional_mode
from torch.fx.experimental.proxy_tensor import disable_proxy_modes_tracing
dummy_aot_config = AOTConfig(
fw_compiler=None, # type: ignore[arg-type]
bw_compiler=None, # type: ignore[arg-type]
partition_fn=None, # type: ignore[arg-type]
decompositions={},
num_params_buffers=0,
aot_id=0,
keep_inference_input_mutations=False,
)
with suspend_functionalization(), disable_functional_mode():
with disable_proxy_modes_tracing():
def _from_fun(
t: Union[Tensor, torch.SymInt, int],
) -> Union[Tensor, torch.SymInt, int]:
if isinstance(t, torch.Tensor):
return torch.empty_strided(
t.size(),
t.stride(),
device=t.device,
dtype=t.dtype,
requires_grad=t.requires_grad,
)
return t
# If someone runs this hop under the default compiler backend ("eager")
# Then this path will be run with the actual user inputs. We convert them
# to fake tensors in order to not perform any actual compute.
from torch._guards import detect_fake_mode
fake_mode = detect_fake_mode(index_values)
if fake_mode is None:
fake_mode = FakeTensorMode(allow_non_fake_inputs=True)
with fake_mode:
unwrapped_score_mod_indexes = pytree.tree_map(_from_fun, index_values)
unwrapped_other_buffers = pytree.tree_map(_from_fun, other_buffers)
assert all(
isinstance(t, (FakeTensor, int, torch.SymInt))
for t in unwrapped_score_mod_indexes + unwrapped_other_buffers
)
example_flat_out = pytree.tree_map(
_from_fun,
score_mod(*unwrapped_score_mod_indexes, *unwrapped_other_buffers),
)
if not isinstance(example_flat_out, torch.Tensor):
raise RuntimeError(
"Expected output of score_mod to be a tensor."
f"Got type {type(example_flat_out)}."
)
example_grad = _from_fun(example_flat_out)
def joint_f(
score: Tensor,
b: Tensor,
h: Tensor,
m: Tensor,
n: Tensor,
example_grad: Tensor,
*other_buffers: tuple[Tensor, ...],
) -> tuple[Tensor, ...]:
def fw_with_masks(
*args: tuple[Tensor, ...],
) -> tuple[tuple[Tensor], tuple[bool]]:
fw_out = score_mod(*args)
out_requires_grad = fw_out.requires_grad
return ((fw_out,), (out_requires_grad,))
joint = create_joint(fw_with_masks, aot_config=dummy_aot_config)
args = [score, b, h, m, n] + list(other_buffers)
optional_grad = [example_grad] if example_grad.requires_grad else []
_, grads = joint(args, optional_grad)
return grads
joint_graph = make_fx(joint_f)(
*unwrapped_score_mod_indexes, example_grad, *unwrapped_other_buffers
)
return score_mod, joint_graph
| FlexAttentionBackwardHOP |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 2770,
"end": 3252
} | class ____(models.Model):
name = models.CharField(max_length=100, unique=True)
company = models.ForeignKey(Company, null=False, on_delete=models.CASCADE)
def __init__(self, **kw):
if "company" in kw:
raise RuntimeError
cname = kw["name"] + "_company"
kw["company"] = Company.objects.create(name=cname)
super().__init__(**kw)
def validate_even(value):
if value % 2 != 0:
raise ValidationError("")
| MandatoryComputed |
python | allegroai__clearml | clearml/backend_api/schema/service.py | {
"start": 159,
"end": 6856
} | class ____(object):
"""Service schema handler"""
__jsonschema_ref_ex = re.compile("^#/definitions/(.*)$")
@property
def default(self) -> ConfigTree:
return self._default
@property
def actions(self) -> Dict[str, Dict[float, Action]]:
return self._actions
@property
def definitions(self) -> Dict[str, Any]:
"""Raw service definitions (each might be dependant on some of its siblings)"""
return self._definitions
@property
def definitions_refs(self) -> Dict[str, Set[str]]:
return self._definitions_refs
@property
def name(self) -> str:
return self._name
@property
def doc(self) -> str:
return self._doc
def __init__(self, name: str, service_config: ConfigTree) -> None:
self._name = name
self._default = None
self._actions = []
self._definitions = None
self._definitions_refs = None
self._doc = None
self.parse(service_config)
@classmethod
def get_ref_name(cls, ref_string: str) -> str:
m = cls.__jsonschema_ref_ex.match(ref_string)
if m:
return m.group(1)
def parse(self, service_config: ConfigTree) -> None:
self._default = service_config.get("_default", ConfigTree()).as_plain_ordered_dict()
self._doc = "{} service".format(self.name)
description = service_config.get("_description", "")
if description:
self._doc += "\n\n{}".format(description)
self._definitions = service_config.get("_definitions", ConfigTree()).as_plain_ordered_dict()
self._definitions_refs = {k: self._get_schema_references(v) for k, v in self._definitions.items()}
all_refs = set(itertools.chain(*self.definitions_refs.values()))
if not all_refs.issubset(self.definitions):
raise ValueError(
"Unresolved references (%s) in %s/definitions"
% (", ".join(all_refs.difference(self.definitions)), self.name)
)
actions = {k: v.as_plain_ordered_dict() for k, v in service_config.items() if not k.startswith("_")}
self._actions = {
action_name: action
for action_name, action in (
(action_name, self._parse_action_versions(action_name, action_versions))
for action_name, action_versions in actions.items()
)
if action
}
def _parse_action_versions(self, action_name: str, action_versions: dict) -> dict:
def parse_version(action_version: str) -> float:
try:
return float(action_version)
except (ValueError, TypeError) as ex:
raise ValueError(
"Failed parsing version number {} ({}) in {}/{}".format(
action_version, ex.args[0], self.name, action_name
)
)
def add_internal(cfg: dict) -> dict:
if "internal" in action_versions:
cfg.setdefault("internal", action_versions["internal"])
return cfg
return {
parsed_version: action
for parsed_version, action in (
(
parsed_version,
self._parse_action(action_name, parsed_version, add_internal(cfg)),
)
for parsed_version, cfg in (
(parse_version(version), cfg)
for version, cfg in action_versions.items()
if version not in ["internal", "allow_roles", "authorize"]
)
)
if action
}
def _get_schema_references(self, s: Any) -> Set[str]:
refs = set()
if isinstance(s, dict):
for k, v in s.items():
if isinstance(v, six.string_types):
m = self.__jsonschema_ref_ex.match(v)
if m:
refs.add(m.group(1))
continue
elif k in ("oneOf", "anyOf") and isinstance(v, list):
refs.update(*map(self._get_schema_references, v))
refs.update(self._get_schema_references(v))
return refs
def _expand_schema_references_with_definitions(self, schema: dict, refs: set = None) -> set:
definitions = schema.get("definitions", {})
refs = refs if refs is not None else self._get_schema_references(schema)
required_refs = set(refs).difference(definitions)
if not required_refs:
return required_refs
if not required_refs.issubset(self.definitions):
raise ValueError("Unresolved references (%s)" % ", ".join(required_refs.difference(self.definitions)))
# update required refs with all sub requirements
last_required_refs = None
while last_required_refs != required_refs:
last_required_refs = required_refs.copy()
additional_refs = set(itertools.chain(*(self.definitions_refs.get(ref, []) for ref in required_refs)))
required_refs.update(additional_refs)
return required_refs
def _resolve_schema_references(self, schema: dict, refs: set = None) -> None:
definitions = schema.get("definitions", {})
definitions.update({k: v for k, v in self.definitions.items() if k in refs})
schema["definitions"] = definitions
def _parse_action(self, action_name: str, action_version: float, action_config: dict) -> Action:
data = self.default.copy()
data.update(action_config)
if not action_config.get("generate", True):
return None
definitions_keys = set()
for schema_key in ("request", "response"):
if schema_key in action_config:
try:
schema = action_config[schema_key]
refs = self._expand_schema_references_with_definitions(schema)
self._resolve_schema_references(schema, refs=refs)
definitions_keys.update(refs)
except ValueError as ex:
name = "%s.%s/%.1f/%s" % (
self.name,
action_name,
action_version,
schema_key,
)
raise ValueError("%s in %s" % (str(ex), name))
return Action(
name=action_name,
version=action_version,
definitions_keys=list(definitions_keys),
service=self.name,
**({key: value for key, value in data.items() if key in attr.fields_dict(Action)})
)
| Service |
python | Pylons__pyramid | src/pyramid/config/views.py | {
"start": 85032,
"end": 85483
} | class ____:
def __init__(
self, view, registry, package, predicates, exception_only, options
):
self.original_view = view
self.registry = registry
self.package = package
self.predicates = predicates or []
self.options = options or {}
self.exception_only = exception_only
@reify
def settings(self):
return self.registry.settings
@implementer(IStaticURLInfo)
| ViewDeriverInfo |
python | pallets__quart | src/quart/asgi.py | {
"start": 6686,
"end": 13769
} | class ____:
def __init__(self, app: Quart, scope: WebsocketScope) -> None:
self.app = app
self.scope = scope
self.queue: asyncio.Queue = asyncio.Queue()
self._accepted = False
self._closed = False
async def __call__(
self, receive: ASGIReceiveCallable, send: ASGISendCallable
) -> None:
websocket = self._create_websocket_from_scope(send)
receiver_task = asyncio.ensure_future(self.handle_messages(receive))
handler_task = asyncio.ensure_future(self.handle_websocket(websocket, send))
done, pending = await asyncio.wait(
[handler_task, receiver_task], return_when=asyncio.FIRST_COMPLETED
)
await cancel_tasks(pending)
raise_task_exceptions(done)
async def handle_messages(self, receive: ASGIReceiveCallable) -> None:
while True:
event = await receive()
if event["type"] == "websocket.receive":
message = event.get("bytes") or event["text"]
await websocket_received.send_async(message)
await self.queue.put(message)
elif event["type"] == "websocket.disconnect":
return
def _create_websocket_from_scope(self, send: ASGISendCallable) -> Websocket:
headers = Headers()
headers["Remote-Addr"] = (self.scope.get("client") or ["<local>"])[0]
for name, value in self.scope["headers"]:
headers.add(name.decode("latin1").title(), value.decode("latin1"))
path = self.scope["path"]
path = path if path[0] == "/" else urlparse(path).path
root_path = self.scope.get("root_path", "")
if root_path != "":
try:
path = path.split(root_path, 1)[1]
path = " " if path == "" else path
except IndexError:
path = " " # Invalid in paths, hence will result in 404
return self.app.websocket_class(
path,
self.scope["query_string"],
self.scope["scheme"],
headers,
self.scope.get("root_path", ""),
self.scope.get("http_version", "1.1"),
list(self.scope.get("subprotocols", [])),
self.queue.get,
partial(self.send_data, send),
partial(self.accept_connection, send),
partial(self.close_connection, send),
scope=self.scope,
)
async def handle_websocket(
self, websocket: Websocket, send: ASGISendCallable
) -> None:
try:
response = await self.app.handle_websocket(websocket)
except Exception as error:
response = await _handle_exception(self.app, error)
if response is not None and not self._accepted:
extensions = self.scope.get("extensions", {}) or {}
if "websocket.http.response" in extensions:
headers = [
(key.lower().encode(), value.encode())
for key, value in response.headers.items()
]
await send(
cast(
WebsocketResponseStartEvent,
{
"type": "websocket.http.response.start",
"status": response.status_code,
"headers": headers,
},
)
)
if isinstance(response, WerkzeugResponse):
for data in response.response:
await send(
cast(
WebsocketResponseBodyEvent,
{
"type": "websocket.http.response.body",
"body": data,
"more_body": True,
},
)
)
elif isinstance(response, Response):
async with response.response as body:
async for data in body:
await send(
cast(
WebsocketResponseBodyEvent,
{
"type": "websocket.http.response.body",
"body": data,
"more_body": True,
},
)
)
await send(
cast(
WebsocketResponseBodyEvent,
{
"type": "websocket.http.response.body",
"body": b"",
"more_body": False,
},
)
)
elif not self._closed:
await send(
cast(WebsocketCloseEvent, {"type": "websocket.close", "code": 1000})
)
elif self._accepted and not self._closed:
await send(
cast(WebsocketCloseEvent, {"type": "websocket.close", "code": 1000})
)
async def send_data(self, send: ASGISendCallable, data: AnyStr) -> None:
if isinstance(data, str):
await send({"type": "websocket.send", "bytes": None, "text": data})
else:
await send({"type": "websocket.send", "bytes": data, "text": None})
await websocket_sent.send_async(data)
async def accept_connection(
self, send: ASGISendCallable, headers: Headers, subprotocol: str | None
) -> None:
if not self._accepted:
message: WebsocketAcceptEvent = {
"headers": [],
"subprotocol": subprotocol,
"type": "websocket.accept",
}
spec_version = _convert_version(
self.scope.get("asgi", {}).get("spec_version", "2.0")
)
if spec_version > [2, 0]:
message["headers"] = encode_headers(headers)
elif headers:
warnings.warn(
"The ASGI Server does not support accept headers, headers not sent",
stacklevel=1,
)
self._accepted = True
await send(message)
async def close_connection(
self, send: ASGISendCallable, code: int, reason: str
) -> None:
if self._closed:
raise RuntimeError("Cannot close websocket multiple times")
spec_version = _convert_version(
self.scope.get("asgi", {}).get("spec_version", "2.0")
)
if spec_version >= [2, 3]:
await send({"type": "websocket.close", "code": code, "reason": reason})
else:
await send({"type": "websocket.close", "code": code}) # type: ignore
self._closed = True
| ASGIWebsocketConnection |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 8629,
"end": 8901
} | class ____:
def f(self, b, c):
local1, local2 = 3, 4
#foo
#? 11 text {'new_name': 'ab', 'until_line': 7, 'until_column': 29}
return local1 & glob1 & b
# bar
local2
# ++++++++++++++++++++++++++++++++++++++++++++++++++
glob1 = 1
| X |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 9069,
"end": 9433
} | class ____(DebugStepperTestCase):
def test_cython_next(self):
self.break_and_run('c = 2')
lines = (
'int(10)',
'puts("spam")',
'os.path.join("foo", "bar")',
'some_c_function()',
)
for line in lines:
gdb.execute('cy next')
self.lineno_equals(line)
| TestNext |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/result.py | {
"start": 29128,
"end": 54271
} | class ____(_WithKeys, ResultInternal[Row[Unpack[_Ts]]]):
"""Represent a set of database results.
.. versionadded:: 1.4 The :class:`_engine.Result` object provides a
completely updated usage model and calling facade for SQLAlchemy
Core and SQLAlchemy ORM. In Core, it forms the basis of the
:class:`_engine.CursorResult` object which replaces the previous
:class:`_engine.ResultProxy` interface. When using the ORM, a
higher level object called :class:`_engine.ChunkedIteratorResult`
is normally used.
.. note:: In SQLAlchemy 1.4 and above, this object is
used for ORM results returned by :meth:`_orm.Session.execute`, which can
yield instances of ORM mapped objects either individually or within
tuple-like rows. Note that the :class:`_engine.Result` object does not
deduplicate instances or rows automatically as is the case with the
legacy :class:`_orm.Query` object. For in-Python de-duplication of
instances or rows, use the :meth:`_engine.Result.unique` modifier
method.
.. seealso::
:ref:`tutorial_fetching_rows` - in the :doc:`/tutorial/index`
"""
__slots__ = ("_metadata", "__dict__")
_row_logging_fn: Optional[
Callable[[Row[Unpack[TupleAny]]], Row[Unpack[TupleAny]]]
] = None
_source_supports_scalars: bool = False
_yield_per: Optional[int] = None
_attributes: util.immutabledict[Any, Any] = util.immutabledict()
def __init__(self, cursor_metadata: ResultMetaData):
self._metadata = cursor_metadata
def __enter__(self) -> Self:
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
self.close()
def close(self) -> None:
"""close this :class:`_engine.Result`.
The behavior of this method is implementation specific, and is
not implemented by default. The method should generally end
the resources in use by the result object and also cause any
subsequent iteration or row fetching to raise
:class:`.ResourceClosedError`.
.. versionadded:: 1.4.27 - ``.close()`` was previously not generally
available for all :class:`_engine.Result` classes, instead only
being available on the :class:`_engine.CursorResult` returned for
Core statement executions. As most other result objects, namely the
ones used by the ORM, are proxying a :class:`_engine.CursorResult`
in any case, this allows the underlying cursor result to be closed
from the outside facade for the case when the ORM query is using
the ``yield_per`` execution option where it does not immediately
exhaust and autoclose the database cursor.
"""
self._soft_close(hard=True)
@property
def _soft_closed(self) -> bool:
raise NotImplementedError()
@property
def closed(self) -> bool:
"""return ``True`` if this :class:`_engine.Result` reports .closed
.. versionadded:: 1.4.43
"""
raise NotImplementedError()
@_generative
def yield_per(self, num: int) -> Self:
"""Configure the row-fetching strategy to fetch ``num`` rows at a time.
This impacts the underlying behavior of the result when iterating over
the result object, or otherwise making use of methods such as
:meth:`_engine.Result.fetchone` that return one row at a time. Data
from the underlying cursor or other data source will be buffered up to
this many rows in memory, and the buffered collection will then be
yielded out one row at a time or as many rows are requested. Each time
the buffer clears, it will be refreshed to this many rows or as many
rows remain if fewer remain.
The :meth:`_engine.Result.yield_per` method is generally used in
conjunction with the
:paramref:`_engine.Connection.execution_options.stream_results`
execution option, which will allow the database dialect in use to make
use of a server side cursor, if the DBAPI supports a specific "server
side cursor" mode separate from its default mode of operation.
.. tip::
Consider using the
:paramref:`_engine.Connection.execution_options.yield_per`
execution option, which will simultaneously set
:paramref:`_engine.Connection.execution_options.stream_results`
to ensure the use of server side cursors, as well as automatically
invoke the :meth:`_engine.Result.yield_per` method to establish
a fixed row buffer size at once.
The :paramref:`_engine.Connection.execution_options.yield_per`
execution option is available for ORM operations, with
:class:`_orm.Session`-oriented use described at
:ref:`orm_queryguide_yield_per`. The Core-only version which works
with :class:`_engine.Connection` is new as of SQLAlchemy 1.4.40.
.. versionadded:: 1.4
:param num: number of rows to fetch each time the buffer is refilled.
If set to a value below 1, fetches all rows for the next buffer.
.. seealso::
:ref:`engine_stream_results` - describes Core behavior for
:meth:`_engine.Result.yield_per`
:ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel`
"""
self._yield_per = num
return self
@_generative
def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self:
"""Apply unique filtering to the objects returned by this
:class:`_engine.Result`.
When this filter is applied with no arguments, the rows or objects
returned will filtered such that each row is returned uniquely. The
algorithm used to determine this uniqueness is by default the Python
hashing identity of the whole tuple. In some cases a specialized
per-entity hashing scheme may be used, such as when using the ORM, a
scheme is applied which works against the primary key identity of
returned objects.
The unique filter is applied **after all other filters**, which means
if the columns returned have been refined using a method such as the
:meth:`_engine.Result.columns` or :meth:`_engine.Result.scalars`
method, the uniquing is applied to **only the column or columns
returned**. This occurs regardless of the order in which these
methods have been called upon the :class:`_engine.Result` object.
The unique filter also changes the calculus used for methods like
:meth:`_engine.Result.fetchmany` and :meth:`_engine.Result.partitions`.
When using :meth:`_engine.Result.unique`, these methods will continue
to yield the number of rows or objects requested, after uniquing
has been applied. However, this necessarily impacts the buffering
behavior of the underlying cursor or datasource, such that multiple
underlying calls to ``cursor.fetchmany()`` may be necessary in order
to accumulate enough objects in order to provide a unique collection
of the requested size.
:param strategy: a callable that will be applied to rows or objects
being iterated, which should return an object that represents the
unique value of the row. A Python ``set()`` is used to store
these identities. If not passed, a default uniqueness strategy
is used which may have been assembled by the source of this
:class:`_engine.Result` object.
"""
self._unique_filter_state = (set(), strategy)
return self
def columns(self, *col_expressions: _KeyIndexType) -> Self:
r"""Establish the columns that should be returned in each row.
This method may be used to limit the columns returned as well
as to reorder them. The given list of expressions are normally
a series of integers or string key names. They may also be
appropriate :class:`.ColumnElement` objects which correspond to
a given statement construct.
.. versionchanged:: 2.0 Due to a bug in 1.4, the
:meth:`_engine.Result.columns` method had an incorrect behavior
where calling upon the method with just one index would cause the
:class:`_engine.Result` object to yield scalar values rather than
:class:`_engine.Row` objects. In version 2.0, this behavior
has been corrected such that calling upon
:meth:`_engine.Result.columns` with a single index will
produce a :class:`_engine.Result` object that continues
to yield :class:`_engine.Row` objects, which include
only a single column.
E.g.::
statement = select(table.c.x, table.c.y, table.c.z)
result = connection.execute(statement)
for z, y in result.columns("z", "y"):
...
Example of using the column objects from the statement itself::
for z, y in result.columns(
statement.selected_columns.c.z, statement.selected_columns.c.y
):
...
.. versionadded:: 1.4
:param \*col_expressions: indicates columns to be returned. Elements
may be integer row indexes, string column names, or appropriate
:class:`.ColumnElement` objects corresponding to a select construct.
:return: this :class:`_engine.Result` object with the modifications
given.
"""
return self._column_slices(col_expressions)
@overload
def scalars(self: Result[_T, Unpack[TupleAny]]) -> ScalarResult[_T]: ...
@overload
def scalars(
self: Result[_T, Unpack[TupleAny]], index: Literal[0]
) -> ScalarResult[_T]: ...
@overload
def scalars(self, index: _KeyIndexType = 0) -> ScalarResult[Any]: ...
def scalars(self, index: _KeyIndexType = 0) -> ScalarResult[Any]:
"""Return a :class:`_engine.ScalarResult` filtering object which
will return single elements rather than :class:`_row.Row` objects.
E.g.::
>>> result = conn.execute(text("select int_id from table"))
>>> result.scalars().all()
[1, 2, 3]
When results are fetched from the :class:`_engine.ScalarResult`
filtering object, the single column-row that would be returned by the
:class:`_engine.Result` is instead returned as the column's value.
.. versionadded:: 1.4
:param index: integer or row key indicating the column to be fetched
from each row, defaults to ``0`` indicating the first column.
:return: a new :class:`_engine.ScalarResult` filtering object referring
to this :class:`_engine.Result` object.
"""
return ScalarResult(self, index)
def _getter(
self, key: _KeyIndexType, raiseerr: bool = True
) -> Optional[Callable[[Row[Unpack[TupleAny]]], Any]]:
"""return a callable that will retrieve the given key from a
:class:`_engine.Row`.
"""
if self._source_supports_scalars:
raise NotImplementedError(
"can't use this function in 'only scalars' mode"
)
return self._metadata._getter(key, raiseerr)
def _tuple_getter(self, keys: Sequence[_KeyIndexType]) -> _TupleGetterType:
"""return a callable that will retrieve the given keys from a
:class:`_engine.Row`.
"""
if self._source_supports_scalars:
raise NotImplementedError(
"can't use this function in 'only scalars' mode"
)
return self._metadata._row_as_tuple_getter(keys)
def mappings(self) -> MappingResult:
"""Apply a mappings filter to returned rows, returning an instance of
:class:`_engine.MappingResult`.
When this filter is applied, fetching rows will return
:class:`_engine.RowMapping` objects instead of :class:`_engine.Row`
objects.
.. versionadded:: 1.4
:return: a new :class:`_engine.MappingResult` filtering object
referring to this :class:`_engine.Result` object.
"""
return MappingResult(self)
@property
@deprecated(
"2.1.0",
"The :attr:`.Result.t` method is deprecated, :class:`.Row` "
"now behaves like a tuple and can unpack types directly.",
)
def t(self) -> TupleResult[Tuple[Unpack[_Ts]]]:
"""Apply a "typed tuple" typing filter to returned rows.
The :attr:`_engine.Result.t` attribute is a synonym for
calling the :meth:`_engine.Result.tuples` method.
.. versionadded:: 2.0
.. seealso::
:ref:`change_10635` - describes a migration path from this
workaround for SQLAlchemy 2.1.
"""
return self # type: ignore
@deprecated(
"2.1.0",
"The :meth:`.Result.tuples` method is deprecated, :class:`.Row` "
"now behaves like a tuple and can unpack types directly.",
)
def tuples(self) -> TupleResult[Tuple[Unpack[_Ts]]]:
"""Apply a "typed tuple" typing filter to returned rows.
This method returns the same :class:`_engine.Result` object
at runtime,
however annotates as returning a :class:`_engine.TupleResult` object
that will indicate to :pep:`484` typing tools that plain typed
``Tuple`` instances are returned rather than rows. This allows
tuple unpacking and ``__getitem__`` access of :class:`_engine.Row`
objects to by typed, for those cases where the statement invoked
itself included typing information.
.. versionadded:: 2.0
:return: the :class:`_engine.TupleResult` type at typing time.
.. seealso::
:ref:`change_10635` - describes a migration path from this
workaround for SQLAlchemy 2.1.
:attr:`_engine.Result.t` - shorter synonym
:attr:`_engine.Row._t` - :class:`_engine.Row` version
"""
return self # type: ignore
def _raw_row_iterator(self) -> Iterator[_RowData]:
"""Return a safe iterator that yields raw row data.
This is used by the :meth:`_engine.Result.merge` method
to merge multiple compatible results together.
"""
raise NotImplementedError()
def __iter__(self) -> Iterator[Row[Unpack[_Ts]]]:
return self._iter_impl()
def __next__(self) -> Row[Unpack[_Ts]]:
return self._next_impl()
def partitions(
self, size: Optional[int] = None
) -> Iterator[Sequence[Row[Unpack[_Ts]]]]:
"""Iterate through sub-lists of rows of the size given.
Each list will be of the size given, excluding the last list to
be yielded, which may have a small number of rows. No empty
lists will be yielded.
The result object is automatically closed when the iterator
is fully consumed.
Note that the backend driver will usually buffer the entire result
ahead of time unless the
:paramref:`.Connection.execution_options.stream_results` execution
option is used indicating that the driver should not pre-buffer
results, if possible. Not all drivers support this option and
the option is silently ignored for those who do not.
When using the ORM, the :meth:`_engine.Result.partitions` method
is typically more effective from a memory perspective when it is
combined with use of the
:ref:`yield_per execution option <orm_queryguide_yield_per>`,
which instructs both the DBAPI driver to use server side cursors,
if available, as well as instructs the ORM loading internals to only
build a certain amount of ORM objects from a result at a time before
yielding them out.
.. versionadded:: 1.4
:param size: indicate the maximum number of rows to be present
in each list yielded. If None, makes use of the value set by
the :meth:`_engine.Result.yield_per`, method, if it were called,
or the :paramref:`_engine.Connection.execution_options.yield_per`
execution option, which is equivalent in this regard. If
yield_per weren't set, it makes use of the
:meth:`_engine.Result.fetchmany` default, which may be backend
specific and not well defined.
:return: iterator of lists
.. seealso::
:ref:`engine_stream_results`
:ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel`
"""
getter = self._manyrow_getter
while True:
partition = getter(self, size)
if partition:
yield partition
else:
break
def fetchall(self) -> Sequence[Row[Unpack[_Ts]]]:
"""A synonym for the :meth:`_engine.Result.all` method."""
return self._allrows()
def fetchone(self) -> Optional[Row[Unpack[_Ts]]]:
"""Fetch one row.
When all rows are exhausted, returns None.
This method is provided for backwards compatibility with
SQLAlchemy 1.x.x.
To fetch the first row of a result only, use the
:meth:`_engine.Result.first` method. To iterate through all
rows, iterate the :class:`_engine.Result` object directly.
:return: a :class:`_engine.Row` object if no filters are applied,
or ``None`` if no rows remain.
"""
row = self._onerow_getter(self)
if row is _NO_ROW:
return None
else:
return row
def fetchmany(
self, size: Optional[int] = None
) -> Sequence[Row[Unpack[_Ts]]]:
"""Fetch many rows.
When all rows are exhausted, returns an empty sequence.
This method is provided for backwards compatibility with
SQLAlchemy 1.x.x.
To fetch rows in groups, use the :meth:`_engine.Result.partitions`
method.
:return: a sequence of :class:`_engine.Row` objects.
.. seealso::
:meth:`_engine.Result.partitions`
"""
return self._manyrow_getter(self, size)
def all(self) -> Sequence[Row[Unpack[_Ts]]]:
"""Return all rows in a sequence.
Closes the result set after invocation. Subsequent invocations
will return an empty sequence.
.. versionadded:: 1.4
:return: a sequence of :class:`_engine.Row` objects.
.. seealso::
:ref:`engine_stream_results` - How to stream a large result set
without loading it completely in python.
"""
return self._allrows()
def first(self) -> Optional[Row[Unpack[_Ts]]]:
"""Fetch the first row or ``None`` if no row is present.
Closes the result set and discards remaining rows.
.. note:: This method returns one **row**, e.g. tuple, by default.
To return exactly one single scalar value, that is, the first
column of the first row, use the
:meth:`_engine.Result.scalar` method,
or combine :meth:`_engine.Result.scalars` and
:meth:`_engine.Result.first`.
Additionally, in contrast to the behavior of the legacy ORM
:meth:`_orm.Query.first` method, **no limit is applied** to the
SQL query which was invoked to produce this
:class:`_engine.Result`;
for a DBAPI driver that buffers results in memory before yielding
rows, all rows will be sent to the Python process and all but
the first row will be discarded.
.. seealso::
:ref:`migration_20_unify_select`
:return: a :class:`_engine.Row` object, or None
if no rows remain.
.. seealso::
:meth:`_engine.Result.scalar`
:meth:`_engine.Result.one`
"""
return self._only_one_row(
raise_for_second_row=False, raise_for_none=False, scalar=False
)
def one_or_none(self) -> Optional[Row[Unpack[_Ts]]]:
"""Return at most one result or raise an exception.
Returns ``None`` if the result has no rows.
Raises :class:`.MultipleResultsFound`
if multiple rows are returned.
.. versionadded:: 1.4
:return: The first :class:`_engine.Row` or ``None`` if no row
is available.
:raises: :class:`.MultipleResultsFound`
.. seealso::
:meth:`_engine.Result.first`
:meth:`_engine.Result.one`
"""
return self._only_one_row(
raise_for_second_row=True, raise_for_none=False, scalar=False
)
def scalar_one(self: Result[_T, Unpack[TupleAny]]) -> _T:
"""Return exactly one scalar result or raise an exception.
This is equivalent to calling :meth:`_engine.Result.scalars` and
then :meth:`_engine.ScalarResult.one`.
.. seealso::
:meth:`_engine.ScalarResult.one`
:meth:`_engine.Result.scalars`
"""
return self._only_one_row(
raise_for_second_row=True, raise_for_none=True, scalar=True
)
def scalar_one_or_none(self: Result[_T, Unpack[TupleAny]]) -> Optional[_T]:
"""Return exactly one scalar result or ``None``.
This is equivalent to calling :meth:`_engine.Result.scalars` and
then :meth:`_engine.ScalarResult.one_or_none`.
.. seealso::
:meth:`_engine.ScalarResult.one_or_none`
:meth:`_engine.Result.scalars`
"""
return self._only_one_row(
raise_for_second_row=True, raise_for_none=False, scalar=True
)
def one(self) -> Row[Unpack[_Ts]]:
"""Return exactly one row or raise an exception.
Raises :class:`_exc.NoResultFound` if the result returns no
rows, or :class:`_exc.MultipleResultsFound` if multiple rows
would be returned.
.. note:: This method returns one **row**, e.g. tuple, by default.
To return exactly one single scalar value, that is, the first
column of the first row, use the
:meth:`_engine.Result.scalar_one` method, or combine
:meth:`_engine.Result.scalars` and
:meth:`_engine.Result.one`.
.. versionadded:: 1.4
:return: The first :class:`_engine.Row`.
:raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound`
.. seealso::
:meth:`_engine.Result.first`
:meth:`_engine.Result.one_or_none`
:meth:`_engine.Result.scalar_one`
"""
return self._only_one_row(
raise_for_second_row=True, raise_for_none=True, scalar=False
)
def scalar(self: Result[_T, Unpack[TupleAny]]) -> Optional[_T]:
"""Fetch the first column of the first row, and close the result set.
Returns ``None`` if there are no rows to fetch.
No validation is performed to test if additional rows remain.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.CursorResult.close`
method will have been called.
:return: a Python scalar value, or ``None`` if no rows remain.
"""
return self._only_one_row(
raise_for_second_row=False, raise_for_none=False, scalar=True
)
def freeze(self) -> FrozenResult[Unpack[_Ts]]:
"""Return a callable object that will produce copies of this
:class:`_engine.Result` when invoked.
The callable object returned is an instance of
:class:`_engine.FrozenResult`.
This is used for result set caching. The method must be called
on the result when it has been unconsumed, and calling the method
will consume the result fully. When the :class:`_engine.FrozenResult`
is retrieved from a cache, it can be called any number of times where
it will produce a new :class:`_engine.Result` object each time
against its stored set of rows.
.. seealso::
:ref:`do_orm_execute_re_executing` - example usage within the
ORM to implement a result-set cache.
"""
return FrozenResult(self)
def merge(
self, *others: Result[Unpack[TupleAny]]
) -> MergedResult[Unpack[TupleAny]]:
"""Merge this :class:`_engine.Result` with other compatible result
objects.
The object returned is an instance of :class:`_engine.MergedResult`,
which will be composed of iterators from the given result
objects.
The new result will use the metadata from this result object.
The subsequent result objects must be against an identical
set of result / cursor metadata, otherwise the behavior is
undefined.
"""
return MergedResult(self._metadata, (self,) + others)
| Result |
python | readthedocs__readthedocs.org | readthedocs/embed/v3/views.py | {
"start": 1353,
"end": 16362
} | class ____(EmbedAPIMixin, CDNCacheTagsMixin, APIView):
# pylint: disable=line-too-long
"""
Embed a section of content from any Read the Docs page.
### Arguments
* url (with fragment) (required)
* doctool
* doctoolversion
* maincontent
### Example
GET https://readthedocs.org/api/v3/embed/?url=https://docs.readthedocs.io/en/latest/features.html%23full-text-search
""" # noqa
permission_classes = [HasEmbedAPIAccess, IsAuthorizedToGetContenFromVersion]
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
@property
def external(self):
# NOTE: ``readthedocs.core.unresolver.unresolve`` returns ``None`` when
# it can't find the project in our database
return self.unresolved_url is None
def _download_page_content(self, url):
# Sanitize the URL before requesting it
url = urlparse(url)._replace(fragment="", query="").geturl()
# TODO: sanitize the cache key just in case, maybe by hashing it
cache_key = f"embed-api-{url}"
cached_response = cache.get(cache_key)
if cached_response:
log.debug("Cached response.", url=url)
return cached_response
response = requests.get(url, timeout=settings.RTD_EMBED_API_DEFAULT_REQUEST_TIMEOUT)
if response.ok:
# NOTE: we use ``response.content`` to get its binary
# representation. Then ``selectolax`` is in charge to auto-detect
# its encoding. We trust more in selectolax for this than in requests.
cache.set(
cache_key,
response.content,
timeout=settings.RTD_EMBED_API_PAGE_CACHE_TIMEOUT,
)
return response.content
def _get_page_content_from_storage(self, project, version, filename):
storage_path = project.get_storage_path(
"html",
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
# Decode encoded URLs (e.g. convert %20 into a whitespace)
filename = urllib.parse.unquote(filename)
# If the filename starts with `/`, the join will fail,
# so we strip it before joining it.
relative_filename = filename.lstrip("/")
file_path = build_media_storage.join(
storage_path,
relative_filename,
)
tryfiles = [file_path, build_media_storage.join(file_path, "index.html")]
for tryfile in tryfiles:
try:
with build_media_storage.open(tryfile) as fd:
return fd.read()
except Exception: # noqa
log.warning("Unable to read file.", file_path=file_path)
return None
def _get_content_by_fragment(
self,
url,
fragment,
doctool,
doctoolversion,
selector,
):
if self.external:
page_content = self._download_page_content(url)
else:
project = self.unresolved_url.project
version = self.unresolved_url.version
filename = self.unresolved_url.filename
page_content = self._get_page_content_from_storage(project, version, filename)
return self._parse_based_on_doctool(
page_content,
fragment,
doctool,
doctoolversion,
selector,
)
def _find_main_node(self, html, selector):
if selector:
try:
return html.css_first(selector)
except ValueError:
log.warning("Invalid CSS selector provided.", selector=selector)
return None
main_node = html.css_first("[role=main]")
if main_node:
log.debug("Main node found. selector=[role=main]")
return main_node
main_node = html.css_first("main")
if main_node:
log.debug("Main node found. selector=main")
return main_node
first_header = html.body.css_first("h1")
if first_header:
log.debug("Main node found. selector=h1")
return first_header.parent
def _parse_based_on_doctool(
self,
page_content,
fragment,
doctool,
doctoolversion,
selector,
):
# pylint: disable=unused-argument disable=too-many-nested-blocks
if not page_content:
return
node = None
if fragment:
# NOTE: we use the `[id=]` selector because using `#{id}` requires
# escaping the selector since CSS does not support the same
# characters as the `id=` HTML attribute
# https://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
selector = f'[id="{fragment}"]'
try:
node = HTMLParser(page_content).css_first(selector)
except ValueError:
log.warning("Invalid CSS selector from fragment.", fragment=fragment)
node = None
else:
html = HTMLParser(page_content)
node = self._find_main_node(html, selector)
if not node:
return
if doctool == "sphinx":
# Handle manual reference special cases
# See https://github.com/readthedocs/sphinx-hoverxref/issues/199
if node.tag == "span" and not node.text():
if any(
[
# docutils <0.18
all(
[
node.parent.tag == "div",
"section" in node.parent.attributes.get("class", []),
]
),
# docutils >=0.18
all(
[
node.parent.tag == "section",
node.parent.attributes.get("id", None),
]
),
]
):
# Sphinx adds an empty ``<span id="my-reference"></span>``
# HTML tag when using manual references (``..
# _my-reference:``). Then, when users refer to it via
# ``:ref:`my-referece``` the API will return the empty
# span. If the parent node is a section, we have to return
# the parent node that will have the content expected.
# Structure:
# <section id="ref-section">
# <span id="ref-manual"></span>
# <h2>Ref Section<a class="headerlink" href="#ref-section">¶</a></h2>
# <p>This is a reference to
# <a class="reference internal" href="#ref-manual"><span>Ref Section</span></a>.
# </p>
# </section>
node = node.parent
# Handle ``dt`` special cases
if node.tag == "dt":
if any(
[
"glossary" in node.parent.attributes.get("class"),
"citation" in node.parent.attributes.get("class"),
]
):
# Sphinx HTML structure for term glossary puts the ``id`` in the
# ``dt`` element with the title of the term. In this case, we
# return the parent node which contains the definition list
# and remove all ``dt/dd`` that are not the requested one
# Structure:
# <dl class="glossary docutils">
# <dt id="term-definition">definition</dt>
# <dd>Text definition for the term</dd>
# ...
# </dl>
parent_node = node.parent
if "glossary" in node.parent.attributes.get("class"):
# iterate through child and next nodes
traverse = node.traverse()
iteration = 0
while iteration < 5:
next_node = next(traverse, None)
# TODO: Do we need to support terms with missing descriptions?
# This will not produce correct results in this case.
# Stop at the next 'dd' node, which is the description
if iteration >= 5 or (next_node and next_node.tag == "dd"):
break
iteration += 1
elif "citation" in node.parent.attributes.get("class"):
next_node = node.next.next
# Iterate over all the siblings (``.iter()``) of the parent
# node and remove ``dt`` and ``dd`` that are not the ones
# we are looking for. Then return the parent node as
# result.
#
# Note that ``.iter()`` returns a generator and we modify
# the HTML in-place, so we have to convert it to a list
# before removing elements. Otherwise we break the
# iteration before completing it
for n in list(parent_node.iter()): # pylint: disable=invalid-name
if n not in (node, next_node):
n.remove()
node = parent_node
else:
# Sphinx HTML structure for definition list puts the ``id``
# the ``dt`` element, instead of the ``dl``. This makes
# the backend to return just the title of the definition. If we
# detect this case, we return the parent with the whole ``dl`` tag
# Structure:
# <dl class="confval">
# <dt id="confval-config">
# <code class="descname">config</code>
# <a class="headerlink" href="#confval-config">¶</a></dt>
# <dd><p>Text with a description</p></dd>
# </dl>
node = node.parent
return node.html
def get(self, request): # noqa
url = request.GET.get("url")
doctool = request.GET.get("doctool")
doctoolversion = request.GET.get("doctoolversion")
selector = request.GET.get("maincontent")
if not url:
return Response(
{"error": ('Invalid arguments. Please provide "url".')},
status=status.HTTP_400_BAD_REQUEST,
)
parsed_url = urlparse(url)
domain = parsed_url.netloc
if not domain or not parsed_url.scheme:
return Response(
{"error": (f"The URL requested is malformed. url={url}")},
status=status.HTTP_400_BAD_REQUEST,
)
if self.external:
for allowed_domain in settings.RTD_EMBED_API_EXTERNAL_DOMAINS:
if re.match(allowed_domain, domain):
break
else:
log.info("Domain not allowed.", domain=domain, url=url)
return Response(
{"error": (f"External domain not allowed. domain={domain}")},
status=status.HTTP_400_BAD_REQUEST,
)
# Check rate-limit for this particular domain
cache_key = f"embed-api-{domain}"
cache.get_or_set(cache_key, 0, timeout=settings.RTD_EMBED_API_DOMAIN_RATE_LIMIT_TIMEOUT)
cache.incr(cache_key)
if cache.get(cache_key) > settings.RTD_EMBED_API_DOMAIN_RATE_LIMIT:
log.warning("Too many requests for this domain.", domain=domain)
return Response(
{"error": (f"Too many requests for this domain. domain={domain}")},
status=status.HTTP_429_TOO_MANY_REQUESTS,
)
# NOTE: we could validate the fragment if we want. It must contain at
# least one character, cannot start with a number, and must not contain
# whitespaces (spaces, tabs, etc.).
fragment = parsed_url.fragment
try:
content_requested = self._get_content_by_fragment(
url,
fragment,
doctool,
doctoolversion,
selector,
)
except requests.exceptions.TooManyRedirects:
log.exception("Too many redirects.", url=url)
return Response(
{"error": (f"The URL requested generates too many redirects. url={url}")},
# TODO: review these status codes to find out which on is better here
# 400 Bad Request
# 502 Bad Gateway
# 503 Service Unavailable
status=status.HTTP_400_BAD_REQUEST,
)
except Exception: # noqa
log.exception("There was an error reading the URL requested.", url=url)
return Response(
{"error": (f"There was an error reading the URL requested. url={url}")},
status=status.HTTP_400_BAD_REQUEST,
)
if not content_requested:
log.warning(
"Identifier not found.",
url=url,
fragment=fragment,
maincontent=selector,
)
return Response(
{
"error": (
"Can't find content for section: "
f"url={url} fragment={fragment} maincontent={selector}"
)
},
status=status.HTTP_404_NOT_FOUND,
)
# Sanitize the URL before requesting it
sanitized_url = urlparse(url)._replace(fragment="", query="").geturl()
# Make links from the content to be absolute
content = clean_references(
content_requested,
sanitized_url,
html_raw_response=True,
)
response = {
"url": url,
"fragment": fragment if fragment else None,
"content": content,
"external": self.external,
}
log.info(
"EmbedAPI successful response.",
project_slug=self.unresolved_url.project.slug if not self.external else None,
domain=domain if self.external else None,
doctool=doctool,
doctoolversion=doctoolversion,
url=url,
referer=request.headers.get("Referer"),
external=self.external,
hoverxref_version=request.headers.get("X-Hoverxref-Version"),
)
return Response(response)
| EmbedAPIBase |
python | celery__celery | celery/utils/log.py | {
"start": 5131,
"end": 8756
} | class ____:
"""Forward file object to :class:`logging.Logger` instance.
Arguments:
logger (~logging.Logger): Logger instance to forward to.
loglevel (int, str): Log level to use when logging messages.
"""
mode = 'w'
name = None
closed = False
loglevel = logging.ERROR
_thread = threading.local()
def __init__(self, logger, loglevel=None):
# pylint: disable=redefined-outer-name
# Note that the logger global is redefined here, be careful changing.
self.logger = logger
self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
self._safewrap_handlers()
def _safewrap_handlers(self):
# Make the logger handlers dump internal errors to
# :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent
# infinite loops.
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
try:
traceback.print_exc(None, sys.__stderr__)
except OSError:
pass # see python issue 5971
handler.handleError = WithSafeHandleError().handleError
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
# type: (AnyStr) -> int
"""Write message to logging object."""
if _in_sighandler:
safe_data = safe_str(data)
print(safe_data, file=sys.__stderr__)
return len(safe_data)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return 0
if data and not self.closed:
self._thread.recurse_protection = True
try:
safe_data = safe_str(data).rstrip('\n')
if safe_data:
self.logger.log(self.loglevel, safe_data)
return len(safe_data)
finally:
self._thread.recurse_protection = False
return 0
def writelines(self, sequence):
# type: (Sequence[str]) -> None
"""Write list of strings to file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
for part in sequence:
self.write(part)
def flush(self):
# This object is not buffered so any :meth:`flush`
# requests are ignored.
pass
def close(self):
# when the object is closed, no write requests are
# forwarded to the logging object anymore.
self.closed = True
def isatty(self):
"""Here for file support."""
return False
def get_multiprocessing_logger():
"""Return the multiprocessing logger."""
try:
from billiard import util
except ImportError:
pass
else:
return util.get_logger()
def reset_multiprocessing_logger():
"""Reset multiprocessing logging setup."""
try:
from billiard import util
except ImportError:
pass
else:
if hasattr(util, '_logger'): # pragma: no cover
util._logger = None
def current_process():
try:
from billiard import process
except ImportError:
pass
else:
return process.current_process()
def current_process_index(base=1):
index = getattr(current_process(), 'index', None)
return index + base if index is not None else index
| LoggingProxy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 3143,
"end": 3213
} | class ____[T1 = str, *Ts1 = Unpack[tuple[T1, T2]], T2 = T1]: ...
| ClassTB |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 93206,
"end": 106158
} | class ____(GoogleCloudBaseOperator, _BigQueryInsertJobOperatorOpenLineageMixin):
"""
Execute a BigQuery job.
Waits for the job to complete and returns job id.
This operator work in the following way:
- it calculates a unique hash of the job using job's configuration or uuid if ``force_rerun`` is True
- creates ``job_id`` in form of
``[provided_job_id | airflow_{dag_id}_{task_id}_{exec_date}]_{uniqueness_suffix}``
- submits a BigQuery job using the ``job_id``
- if job with given id already exists then it tries to reattach to the job if its not done and its
state is in ``reattach_states``. If the job is done the operator will raise ``AirflowException``.
Using ``force_rerun`` will submit a new job every time without attaching to already existing ones.
For job definition see here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryInsertJobOperator`
:param configuration: The configuration parameter maps directly to BigQuery's
configuration field in the job object. For more details see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:param result_retry: How to retry the `result` call that retrieves rows
:param result_timeout: The number of seconds to wait for `result` method before using `result_retry`
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
"""
template_fields: Sequence[str] = (
"configuration",
"job_id",
"gcp_conn_id",
"impersonation_chain",
"project_id",
)
template_ext: Sequence[str] = (
".json",
".sql",
)
template_fields_renderers = {"configuration": "json", "configuration.query.query": "sql"}
ui_color = BigQueryUIColors.QUERY.value
operator_extra_links = (BigQueryTableLink(), BigQueryJobDetailLink())
def __init__(
self,
configuration: dict[str, Any],
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
job_id: str | None = None,
force_rerun: bool = True,
reattach_states: set[str] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
cancel_on_kill: bool = True,
result_retry: Retry = DEFAULT_RETRY,
result_timeout: float | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.configuration = configuration
self.location = location
self.job_id = job_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.force_rerun = force_rerun
self.reattach_states: set[str] = reattach_states or set()
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
self.result_retry = result_retry
self.result_timeout = result_timeout
self.hook: BigQueryHook | None = None
self.deferrable = deferrable
self.poll_interval = poll_interval
@cached_property
def sql(self) -> str | None:
try:
return self.configuration["query"]["query"]
except KeyError:
return None
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.configuration, str) and self.configuration.endswith(".json"):
with open(self.configuration) as file:
self.configuration = json.loads(file.read())
def _add_job_labels(self) -> None:
dag_label = self.dag_id.lower()
task_label = self.task_id.lower().replace(".", "-")
if LABEL_REGEX.match(dag_label) and LABEL_REGEX.match(task_label):
automatic_labels = {"airflow-dag": dag_label, "airflow-task": task_label}
if isinstance(self.configuration.get("labels"), dict):
self.configuration["labels"].update(automatic_labels)
elif "labels" not in self.configuration:
self.configuration["labels"] = automatic_labels
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
# Annotate the job with dag and task id labels
self._add_job_labels()
# Submit a new job without waiting for it to complete.
return hook.insert_job(
configuration=self.configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
timeout=self.result_timeout,
retry=self.result_retry,
nowait=True,
)
def _handle_job_error(self, job: BigQueryJob | UnknownJob) -> None:
self.log.info("Job %s is completed. Checking the job status", self.job_id)
# Log any transient errors encountered during the job execution
for error in job.errors or []:
self.log.error("BigQuery Job Error: %s", error)
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
# Check the final state.
if job.state != "DONE":
raise AirflowException(f"Job failed with state: {job.state}")
def execute(self, context: Any):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
if self.project_id is None:
self.project_id = hook.project_id
self.job_id = hook.generate_job_id(
job_id=self.job_id,
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=None,
configuration=self.configuration,
run_after=hook.get_run_after_or_logical_date(context),
force_rerun=self.force_rerun,
)
try:
self.log.info("Executing: %s'", self.configuration)
# Create a job
if self.job_id is None:
raise ValueError("job_id cannot be None")
job: BigQueryJob | UnknownJob = self._submit_job(hook, self.job_id)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=self.job_id,
)
if job.state not in self.reattach_states:
# Same job configuration, so we need force_rerun
raise AirflowException(
f"Job with id: {self.job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
# Job already reached state DONE
if job.state == "DONE":
raise AirflowException("Job is already in state DONE. Can not reattach to this job.")
# We are reattaching to a job
self.log.info("Reattaching to existing Job in state %s", job.state)
self._handle_job_error(job)
job_types = {
LoadJob._JOB_TYPE: ["sourceTable", "destinationTable"],
CopyJob._JOB_TYPE: ["sourceTable", "destinationTable"],
ExtractJob._JOB_TYPE: ["sourceTable"],
QueryJob._JOB_TYPE: ["destinationTable"],
}
if self.project_id:
for job_type, tables_prop in job_types.items():
job_configuration = job.to_api_repr()["configuration"]
if job_type in job_configuration:
for table_prop in tables_prop:
if table_prop in job_configuration[job_type]:
table = job_configuration[job_type][table_prop]
persist_kwargs = {
"context": context,
"project_id": self.project_id,
"table_id": table,
}
if not isinstance(table, str):
persist_kwargs["table_id"] = table["tableId"]
persist_kwargs["dataset_id"] = table["datasetId"]
persist_kwargs["project_id"] = table["projectId"]
BigQueryTableLink.persist(**persist_kwargs)
self.job_id = job.job_id
if self.project_id:
job_id_path = convert_job_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
context["ti"].xcom_push(key="job_id_path", value=job_id_path)
persist_kwargs = {
"context": context,
"project_id": self.project_id,
"location": self.location,
"job_id": self.job_id,
}
BigQueryJobDetailLink.persist(**persist_kwargs)
# Wait for the job to complete
if not self.deferrable:
job.result(timeout=self.result_timeout, retry=self.result_retry)
self._handle_job_error(job)
return self.job_id
if job.running():
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryInsertJobTrigger(
conn_id=self.gcp_conn_id,
job_id=self.job_id,
project_id=self.project_id,
location=self.location or hook.location,
poll_interval=self.poll_interval,
impersonation_chain=self.impersonation_chain,
cancel_on_kill=self.cancel_on_kill,
),
method_name="execute_complete",
)
self.log.info("Current state of job %s is %s", job.job_id, job.state)
self._handle_job_error(job)
return self.job_id
def execute_complete(self, context: Context, event: dict[str, Any]) -> str | None:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
# Save job_id as an attribute to be later used by listeners
self.job_id = event.get("job_id")
return self.job_id
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job( # type: ignore[union-attr]
job_id=self.job_id, project_id=self.project_id, location=self.location
)
else:
self.log.info("Skipping to cancel job: %s:%s.%s", self.project_id, self.location, self.job_id)
| BigQueryInsertJobOperator |
python | google__jax | tests/pallas/gpu_ops_test.py | {
"start": 14125,
"end": 14988
} | class ____(PallasBaseTest):
def setUp(self):
super().setUp()
if jtu.test_device_matches(["cpu", "tpu"]):
self.skipTest("Works only on GPU")
@parameterized.product(
shape=[(1024, 125), (4, 1024, 125)],
dtype=[jnp.bfloat16, jnp.float16, jnp.float32]
)
def test_softmax(self, shape, dtype):
x = jax.random.normal(random.key(0), shape, dtype=dtype)
atol, rtol = {
jnp.bfloat16: (1e-2, 1e-4),
jnp.float16: (1e-2, 1e-4),
jnp.float32: (1e-7, 1e-6),
}[dtype]
# We upcast to float32 because NumPy <2.0 does not handle custom dtypes
# properly. See https://github.com/jax-ml/jax/issues/11014.
np.testing.assert_allclose(
softmax.softmax(x, axis=-1).astype(jnp.float32),
jax.nn.softmax(x, axis=-1).astype(jnp.float32),
atol=atol,
rtol=rtol,
)
| SoftmaxTest |
python | getsentry__sentry | tests/sentry/api/test_base.py | {
"start": 17050,
"end": 19976
} | class ____(APITestCase):
def test_serializes_params(self) -> None:
request = self.make_request(method="GET", path="/api/0/organizations/")
request.GET = QueryDict("member=1&cursor=foo")
endpoint = Endpoint()
result = endpoint.build_cursor_link(
request, "next", Cursor.from_string("1492107369532:0:0")
)
assert result == (
"<http://testserver/api/0/organizations/?member=1&cursor=1492107369532:0:0>;"
' rel="next"; results="false"; cursor="1492107369532:0:0"'
)
def test_preserves_ssl_proto(self) -> None:
request = self.make_request(method="GET", path="/api/0/organizations/", secure_scheme=True)
request.GET = QueryDict("member=1&cursor=foo")
endpoint = Endpoint()
with override_options({"system.url-prefix": "https://testserver"}):
result = endpoint.build_cursor_link(
request, "next", Cursor.from_string("1492107369532:0:0")
)
assert result == (
"<https://testserver/api/0/organizations/?member=1&cursor=1492107369532:0:0>;"
' rel="next"; results="false"; cursor="1492107369532:0:0"'
)
def test_handles_customer_domains(self) -> None:
request = self.make_request(
method="GET", path="/api/0/organizations/", secure_scheme=True, subdomain="bebe"
)
request.GET = QueryDict("member=1&cursor=foo")
endpoint = Endpoint()
with override_options(
{
"system.url-prefix": "https://testserver",
"system.organization-url-template": "https://{hostname}",
}
):
result = endpoint.build_cursor_link(
request, "next", Cursor.from_string("1492107369532:0:0")
)
assert result == (
"<https://bebe.testserver/api/0/organizations/?member=1&cursor=1492107369532:0:0>;"
' rel="next"; results="false"; cursor="1492107369532:0:0"'
)
def test_unicode_path(self) -> None:
request = self.make_request(method="GET", path="/api/0/organizations/üuuuu/")
endpoint = Endpoint()
result = endpoint.build_cursor_link(
request, "next", Cursor.from_string("1492107369532:0:0")
)
assert result == (
"<http://testserver/api/0/organizations/%C3%BCuuuu/?&cursor=1492107369532:0:0>;"
' rel="next"; results="false"; cursor="1492107369532:0:0"'
)
def test_encodes_url(self) -> None:
endpoint = Endpoint()
request = self.make_request(method="GET", path="/foo/bar/lol:what/")
result = endpoint.build_cursor_link(request, "next", cursor=Cursor(0, 0, 0))
assert (
result
== '<http://testserver/foo/bar/lol%3Awhat/?&cursor=0:0:0>; rel="next"; results="false"; cursor="0:0:0"'
)
| CursorGenerationTest |
python | getsentry__sentry | src/sentry/flags/providers.py | {
"start": 13047,
"end": 13210
} | class ____(serializers.Serializer):
data = serializers.ListField(child=StatsigEventSerializer(), required=True) # type: ignore[assignment]
| StatsigItemSerializer |
python | mwaskom__seaborn | tests/_core/test_moves.py | {
"start": 9723,
"end": 10268
} | class ____(MoveFixtures):
def test_default(self, toy_df):
gb = GroupBy(["color", "group"])
res = Shift()(toy_df, gb, "x", {})
for col in toy_df:
assert_series_equal(toy_df[col], res[col])
@pytest.mark.parametrize("x,y", [(.3, 0), (0, .2), (.1, .3)])
def test_moves(self, toy_df, x, y):
gb = GroupBy(["color", "group"])
res = Shift(x=x, y=y)(toy_df, gb, "x", {})
assert_array_equal(res["x"], toy_df["x"] + x)
assert_array_equal(res["y"], toy_df["y"] + y)
| TestShift |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 19963,
"end": 23624
} | class ____:
"""
A class only kept for backwards compatibility.
Its functionality is entirely provided by module-level functions.
"""
colors = _colors_full_map
cache = _colors_full_map.cache
to_rgb = staticmethod(to_rgb)
to_rgba = staticmethod(to_rgba)
to_rgba_array = staticmethod(to_rgba_array)
colorConverter = ColorConverter()
### End of backwards-compatible color-conversion API
def _create_lookup_table(N, data, gamma=1.0):
r"""
Create an *N* -element 1D lookup table.
This assumes a mapping :math:`f : [0, 1] \rightarrow [0, 1]`. The returned
data is an array of N values :math:`y = f(x)` where x is sampled from
[0, 1].
By default (*gamma* = 1) x is equidistantly sampled from [0, 1]. The
*gamma* correction factor :math:`\gamma` distorts this equidistant
sampling by :math:`x \rightarrow x^\gamma`.
Parameters
----------
N : int
The number of elements of the created lookup table; at least 1.
data : (M, 3) array-like or callable
Defines the mapping :math:`f`.
If a (M, 3) array-like, the rows define values (x, y0, y1). The x
values must start with x=0, end with x=1, and all x values be in
increasing order.
A value between :math:`x_i` and :math:`x_{i+1}` is mapped to the range
:math:`y^1_{i-1} \ldots y^0_i` by linear interpolation.
For the simple case of a y-continuous mapping, y0 and y1 are identical.
The two values of y are to allow for discontinuous mapping functions.
E.g. a sawtooth with a period of 0.2 and an amplitude of 1 would be::
[(0, 1, 0), (0.2, 1, 0), (0.4, 1, 0), ..., [(1, 1, 0)]
In the special case of ``N == 1``, by convention the returned value
is y0 for x == 1.
If *data* is a callable, it must accept and return numpy arrays::
data(x : ndarray) -> ndarray
and map values between 0 - 1 to 0 - 1.
gamma : float
Gamma correction factor for input distribution x of the mapping.
See also https://en.wikipedia.org/wiki/Gamma_correction.
Returns
-------
array
The lookup table where ``lut[x * (N-1)]`` gives the closest value
for values of x between 0 and 1.
Notes
-----
This function is internally used for `.LinearSegmentedColormap`.
"""
if callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=float), 0, 1)
return lut
try:
adata = np.array(data)
except Exception as err:
raise TypeError("data must be convertible to an array") from err
_api.check_shape((None, 3), data=adata)
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0 and end with x=1")
if (np.diff(x) < 0).any():
raise ValueError("data mapping points must have x in increasing order")
# begin generation of lookup table
if N == 1:
# convention: use the y = f(x=1) value for a 1-element lookup table
lut = np.array(y0[-1])
else:
x = x * (N - 1)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut = np.concatenate([
[y1[0]],
distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1],
[y0[-1]],
])
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
| ColorConverter |
python | aimacode__aima-python | mdp.py | {
"start": 3953,
"end": 4422
} | class ____(MDP):
"""
Inherits from MDP. Handles terminal states, and transitions to and from terminal states better.
"""
def __init__(self, init, actlist, terminals, transitions, reward=None, gamma=0.9):
MDP.__init__(self, init, actlist, terminals, transitions, reward, gamma=gamma)
def T(self, state, action):
if action is None:
return [(0.0, state)]
else:
return self.transitions[state][action]
| MDP2 |
python | huggingface__transformers | src/transformers/models/marian/modeling_marian.py | {
"start": 13988,
"end": 19069
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: MarianConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MarianAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = MarianAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| MarianDecoderLayer |
python | django__django | django/db/models/fields/__init__.py | {
"start": 72278,
"end": 73434
} | class ____(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return float(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.FloatField,
**kwargs,
}
)
| FloatField |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ScaleBar.py | {
"start": 277,
"end": 2334
} | class ____(GraphicsWidgetAnchor, GraphicsObject):
"""
Displays a rectangular bar to indicate the relative scale of objects on the view.
"""
def __init__(self, size, width=5, brush=None, pen=None, suffix='m', offset=None):
GraphicsObject.__init__(self)
GraphicsWidgetAnchor.__init__(self)
self.setFlag(self.GraphicsItemFlag.ItemHasNoContents)
self.setAcceptedMouseButtons(QtCore.Qt.MouseButton.NoButton)
if brush is None:
brush = getConfigOption('foreground')
self.brush = fn.mkBrush(brush)
self.pen = fn.mkPen(pen)
self._width = width
self.size = size
if offset is None:
offset = (0,0)
self.offset = offset
self.bar = QtWidgets.QGraphicsRectItem()
self.bar.setPen(self.pen)
self.bar.setBrush(self.brush)
self.bar.setParentItem(self)
self.text = TextItem(text=fn.siFormat(size, suffix=suffix), anchor=(0.5,1))
self.text.setParentItem(self)
def changeParent(self):
view = self.parentItem()
if view is None:
return
view.sigRangeChanged.connect(self.updateBar)
self.updateBar()
def updateBar(self):
view = self.parentItem()
if view is None:
return
p1 = view.mapFromViewToItem(self, QtCore.QPointF(0,0))
p2 = view.mapFromViewToItem(self, QtCore.QPointF(self.size,0))
w = (p2-p1).x()
self.bar.setRect(QtCore.QRectF(-w, 0, w, self._width))
self.text.setPos(-w/2., 0)
def boundingRect(self):
return QtCore.QRectF()
def setParentItem(self, p):
ret = GraphicsObject.setParentItem(self, p)
if self.offset is not None:
offset = Point(self.offset)
anchorx = 1 if offset[0] <= 0 else 0
anchory = 1 if offset[1] <= 0 else 0
anchor = (anchorx, anchory)
self.anchor(itemPos=anchor, parentPos=anchor, offset=offset)
return ret
| ScaleBar |
python | pandas-dev__pandas | pandas/tests/scalar/period/test_arithmetic.py | {
"start": 217,
"end": 13965
} | class ____:
def test_add_overflow_raises(self):
# GH#55503
per = Timestamp.max.to_period("ns")
msg = "|".join(
[
"Python int too large to convert to C long",
# windows, 32bit linux builds
"int too big to convert",
]
)
with pytest.raises(OverflowError, match=msg):
per + 1
msg = "value too large"
with pytest.raises(OverflowError, match=msg):
per + Timedelta(1)
with pytest.raises(OverflowError, match=msg):
per + offsets.Nano(1)
def test_period_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_period_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
def test_period_sub_period_annual(self):
left, right = Period("2011", freq="Y"), Period("2007", freq="Y")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=Y-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_period_sub_period(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH#23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH#23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_period_add_offset(self):
# freq is DateOffset
for freq in ["Y", "2Y", "3Y"]:
per = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert per + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + per == exp
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
for freq in ["M", "2M", "3M"]:
per = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert per + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + per == exp
exp = Period("2012-03", freq=freq)
assert per + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
# freq is Tick
for freq in ["D", "2D", "3D"]:
per = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert per + offsets.Day(5) == exp
assert offsets.Day(5) + per == exp
exp = Period("2011-04-02", freq=freq)
assert per + offsets.Hour(24) == exp
assert offsets.Hour(24) + per == exp
exp = Period("2011-04-03", freq=freq)
assert per + np.timedelta64(2, "D") == exp
assert np.timedelta64(2, "D") + per == exp
exp = Period("2011-04-02", freq=freq)
assert per + np.timedelta64(3600 * 24, "s") == exp
assert np.timedelta64(3600 * 24, "s") + per == exp
exp = Period("2011-03-30", freq=freq)
assert per + timedelta(-2) == exp
assert timedelta(-2) + per == exp
exp = Period("2011-04-03", freq=freq)
assert per + timedelta(hours=48) == exp
assert timedelta(hours=48) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
for freq in ["h", "2h", "3h"]:
per = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert per + offsets.Day(2) == exp
assert offsets.Day(2) + per == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert per + offsets.Hour(3) == exp
assert offsets.Hour(3) + per == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert per + np.timedelta64(3, "h") == exp
assert np.timedelta64(3, "h") + per == exp
exp = Period("2011-04-01 10:00", freq=freq)
assert per + np.timedelta64(3600, "s") == exp
assert np.timedelta64(3600, "s") + per == exp
exp = Period("2011-04-01 11:00", freq=freq)
assert per + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + per == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert per + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
def test_period_sub_offset(self):
# freq is DateOffset
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for freq in ["Y", "2Y", "3Y"]:
per = Period("2011", freq=freq)
assert per - offsets.YearEnd(2) == Period("2009", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
for freq in ["M", "2M", "3M"]:
per = Period("2011-03", freq=freq)
assert per - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert per - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
# freq is Tick
for freq in ["D", "2D", "3D"]:
per = Period("2011-04-01", freq=freq)
assert per - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert per - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert per - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert per - np.timedelta64(3600 * 24, "s") == Period(
"2011-03-31", freq=freq
)
assert per - timedelta(-2) == Period("2011-04-03", freq=freq)
assert per - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
for freq in ["h", "2h", "3h"]:
per = Period("2011-04-01 09:00", freq=freq)
assert per - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert per - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert per - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert per - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert per - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert per - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
# GH#13071
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "m"])
def test_period_add_sub_td64_nat(self, unit):
# GH#47196
per = Period("2022-06-01", "D")
nat = np.timedelta64("NaT", unit)
assert per + nat is NaT
assert nat + per is NaT
assert per - nat is NaT
with pytest.raises(TypeError, match="unsupported operand"):
nat - per
def test_period_ops_offset(self):
per = Period("2011-04-01", freq="D")
result = per + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = per - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
per - offsets.Hour(2)
def test_period_add_timestamp_raises(self):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
msg = r"unsupported operand type\(s\) for \+: 'Timestamp' and 'Period'"
with pytest.raises(TypeError, match=msg):
ts + per
msg = r"unsupported operand type\(s\) for \+: 'Period' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
per + ts
| TestPeriodArithmetic |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 964,
"end": 1020
} | class ____(forms.Form):
data = forms.CharField()
| Step3 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance2.py | {
"start": 646,
"end": 880
} | class ____:
@classmethod
def test(cls: type[TD], id: int | TD):
if isinstance(id, cls):
reveal_type(id, expected_text="ClassD*")
else:
reveal_type(id, expected_text="int | ClassD*")
| ClassD |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.