language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/test_utils/tests.py | {
"start": 48840,
"end": 49731
} | class ____(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
| AssertRaisesMsgTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 2106,
"end": 2792
} | class ____(BaseModel):
name: str = Field(..., description="")
version: str = Field(..., description="")
features: Optional["AppFeaturesTelemetry"] = Field(default=None, description="")
runtime_features: Optional["FeatureFlags"] = Field(default=None, description="")
hnsw_global_config: Optional["HnswGlobalConfig"] = Field(default=None, description="")
system: Optional["RunningEnvironmentTelemetry"] = Field(default=None, description="")
jwt_rbac: Optional[bool] = Field(default=None, description="")
hide_jwt_dashboard: Optional[bool] = Field(default=None, description="")
startup: Union[datetime, date] = Field(..., description="")
| AppBuildTelemetry |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/llms/test_cache.py | {
"start": 231,
"end": 2083
} | class ____(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`."""
self._cache[prompt, llm_string] = return_val
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
async def test_local_cache_generate_async() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
def test_local_cache_generate_sync() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
| InMemoryCache |
python | Netflix__metaflow | metaflow/graph.py | {
"start": 1378,
"end": 9429
} | class ____(object):
def __init__(
self, func_ast, decos, wrappers, config_decorators, doc, source_file, lineno
):
self.name = func_ast.name
self.source_file = source_file
# lineno is the start line of decorators in source_file
# func_ast.lineno is lines from decorators start to def of function
self.func_lineno = lineno + func_ast.lineno - 1
self.decorators = decos
self.wrappers = wrappers
self.config_decorators = config_decorators
self.doc = deindent_docstring(doc)
self.parallel_step = any(getattr(deco, "IS_PARALLEL", False) for deco in decos)
# these attributes are populated by _parse
self.tail_next_lineno = 0
self.type = None
self.out_funcs = []
self.has_tail_next = False
self.invalid_tail_next = False
self.num_args = 0
self.switch_cases = {}
self.condition = None
self.foreach_param = None
self.num_parallel = 0
self.parallel_foreach = False
self._parse(func_ast, lineno)
# these attributes are populated by _traverse_graph
self.in_funcs = set()
self.split_parents = []
self.split_branches = []
self.matching_join = None
# these attributes are populated by _postprocess
self.is_inside_foreach = False
def _expr_str(self, expr):
return "%s.%s" % (expr.value.id, expr.attr)
def _parse_switch_dict(self, dict_node):
switch_cases = {}
if isinstance(dict_node, ast.Dict):
for key, value in zip(dict_node.keys, dict_node.values):
case_key = None
# handle string literals
if hasattr(ast, "Str") and isinstance(key, ast.Str):
case_key = key.s
elif isinstance(key, ast.Constant):
case_key = key.value
elif isinstance(key, ast.Attribute):
if isinstance(key.value, ast.Attribute) and isinstance(
key.value.value, ast.Name
):
# This handles self.config.some_key
if key.value.value.id == "self":
config_var = key.value.attr
config_key = key.attr
case_key = f"config:{config_var}.{config_key}"
else:
return None
else:
return None
# handle variables or other dynamic expressions - not allowed
elif isinstance(key, ast.Name):
return None
else:
# can't statically analyze this key
return None
if case_key is None:
return None
# extract the step name from the value
if isinstance(value, ast.Attribute) and isinstance(
value.value, ast.Name
):
if value.value.id == "self":
step_name = value.attr
switch_cases[case_key] = step_name
else:
return None
else:
return None
return switch_cases if switch_cases else None
def _parse(self, func_ast, lineno):
self.num_args = len(func_ast.args.args)
tail = func_ast.body[-1]
# end doesn't need a transition
if self.name == "end":
# TYPE: end
self.type = "end"
# ensure that the tail an expression
if not isinstance(tail, ast.Expr):
return
# determine the type of self.next transition
try:
if not self._expr_str(tail.value.func) == "self.next":
return
self.has_tail_next = True
self.invalid_tail_next = True
self.tail_next_lineno = lineno + tail.lineno - 1
# Check if first argument is a dictionary (switch case)
if (
len(tail.value.args) == 1
and isinstance(tail.value.args[0], ast.Dict)
and any(k.arg == "condition" for k in tail.value.keywords)
):
# This is a switch statement
switch_cases = self._parse_switch_dict(tail.value.args[0])
condition_name = None
# Get condition parameter
for keyword in tail.value.keywords:
if keyword.arg == "condition":
if hasattr(ast, "Str") and isinstance(keyword.value, ast.Str):
condition_name = keyword.value.s
elif isinstance(keyword.value, ast.Constant) and isinstance(
keyword.value.value, str
):
condition_name = keyword.value.value
break
if switch_cases and condition_name:
self.type = "split-switch"
self.condition = condition_name
self.switch_cases = switch_cases
self.out_funcs = list(switch_cases.values())
self.invalid_tail_next = False
return
else:
self.out_funcs = [e.attr for e in tail.value.args]
keywords = dict(
(k.arg, getattr(k.value, "s", None)) for k in tail.value.keywords
)
if len(keywords) == 1:
if "foreach" in keywords:
# TYPE: foreach
self.type = "foreach"
if len(self.out_funcs) == 1:
self.foreach_param = keywords["foreach"]
self.invalid_tail_next = False
elif "num_parallel" in keywords:
self.type = "foreach"
self.parallel_foreach = True
if len(self.out_funcs) == 1:
self.num_parallel = keywords["num_parallel"]
self.invalid_tail_next = False
elif len(keywords) == 0:
if len(self.out_funcs) > 1:
# TYPE: split
self.type = "split"
self.invalid_tail_next = False
elif len(self.out_funcs) == 1:
# TYPE: linear
if self.name == "start":
self.type = "start"
elif self.num_args > 1:
self.type = "join"
else:
self.type = "linear"
self.invalid_tail_next = False
except AttributeError:
return
def __str__(self):
return """*[{0.name} {0.type} ({0.source_file} line {0.func_lineno})]*
in_funcs={in_funcs}
out_funcs={out_funcs}
split_parents={parents}
split_branches={branches}
matching_join={matching_join}
is_inside_foreach={is_inside_foreach}
decorators={decos}
num_args={0.num_args}
has_tail_next={0.has_tail_next} (line {0.tail_next_lineno})
invalid_tail_next={0.invalid_tail_next}
foreach_param={0.foreach_param}
condition={0.condition}
parallel_step={0.parallel_step}
parallel_foreach={0.parallel_foreach}
-> {out}""".format(
self,
matching_join=self.matching_join and "[%s]" % self.matching_join,
is_inside_foreach=self.is_inside_foreach,
out_funcs=", ".join("[%s]" % x for x in self.out_funcs),
in_funcs=", ".join("[%s]" % x for x in self.in_funcs),
parents=", ".join("[%s]" % x for x in self.split_parents),
branches=", ".join("[%s]" % x for x in self.split_branches),
decos=" | ".join(map(str, self.decorators)),
out=", ".join("[%s]" % x for x in self.out_funcs),
)
| DAGNode |
python | django__django | tests/template_tests/filter_tests/test_stringformat.py | {
"start": 1113,
"end": 1814
} | class ____(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, "03d"), "001")
self.assertEqual(stringformat([1, None], "s"), "[1, None]")
self.assertEqual(stringformat((1, 2, 3), "s"), "(1, 2, 3)")
self.assertEqual(stringformat((1,), "s"), "(1,)")
self.assertEqual(stringformat({1, 2}, "s"), "{1, 2}")
self.assertEqual(stringformat({1: 2, 2: 3}, "s"), "{1: 2, 2: 3}")
def test_invalid(self):
self.assertEqual(stringformat(1, "z"), "")
self.assertEqual(stringformat(object(), "d"), "")
self.assertEqual(stringformat(None, "d"), "")
self.assertEqual(stringformat((1, 2, 3), "d"), "")
| FunctionTests |
python | django-extensions__django-extensions | tests/management/commands/test_unreferenced_files.py | {
"start": 615,
"end": 1871
} | class ____(TestCase):
def setUp(self):
self.media_root_dir = mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.media_root_dir)
@patch("sys.stdout", new_callable=StringIO)
def test_should_not_print_any_output(self, m_stdout):
with override_settings(MEDIA_ROOT=self.media_root_dir):
call_command("unreferenced_files")
self.assertIs("", m_stdout.getvalue())
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_unreferenced_hello_txt_file(self, m_stdout):
fn = os.path.join(self.media_root_dir, "hello.txt")
open(fn, "a").close()
with override_settings(MEDIA_ROOT=self.media_root_dir):
call_command("unreferenced_files")
self.assertIn(fn, m_stdout.getvalue())
@patch("sys.stdout", new_callable=StringIO)
def test_should_not_print_referenced_image_jpg_file(self, m_stdout):
fn = os.path.join(self.media_root_dir, "image.jpg")
open(fn, "a").close()
Photo.objects.create(photo="image.jpg")
with override_settings(MEDIA_ROOT=self.media_root_dir):
call_command("unreferenced_files")
self.assertNotIn(fn, m_stdout.getvalue())
| UnreferencedFilesTests |
python | walkccc__LeetCode | solutions/2957. Remove Adjacent Almost-Equal Characters/2957.py | {
"start": 0,
"end": 248
} | class ____:
def removeAlmostEqualCharacters(self, word: str) -> int:
ans = 0
i = 1
while i < len(word):
if abs(ord(word[i]) - ord(word[i - 1])) <= 1:
ans += 1
i += 2
else:
i += 1
return ans
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_transaction_threshold_override.py | {
"start": 406,
"end": 7770
} | class ____(APITestCase):
feature_name = "organizations:performance-view"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.org, members=[self.user])
self.project = self.create_project(organization=self.org, teams=[self.team])
self.url = reverse(
"sentry-api-0-organization-project-transaction-threshold-override",
args=[self.org.slug],
)
self.data = load_data(
"transaction",
timestamp=before_now(minutes=1),
start_timestamp=before_now(minutes=1, milliseconds=500),
)
self.data["transaction"] = "earth"
self.store_event(self.data, project_id=self.project.id)
def test_get_for_project_with_custom_threshold(self) -> None:
ProjectTransactionThresholdOverride.objects.create(
transaction="earth",
project=self.project,
organization=self.project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
with self.feature(self.feature_name):
response = self.client.get(
self.url,
data={
"project": [self.project.id],
"transaction": self.data["transaction"],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["threshold"] == "400"
assert response.data["metric"] == "lcp"
def test_get_for_project_without_custom_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.get(
self.url,
data={
"project": [self.project.id],
"transaction": self.data["transaction"],
},
format="json",
)
assert response.status_code == 404
def test_get_returns_error_without_feature_enabled(self) -> None:
with self.feature({self.feature_name: False, "organizations:discover-basic": False}):
ProjectTransactionThresholdOverride.objects.create(
project=self.project,
organization=self.project.organization,
threshold=300,
metric=TransactionMetric.DURATION.value,
transaction=self.data["transaction"],
)
response = self.client.get(
self.url,
data={
"project": [self.project.id],
"transaction": self.data["transaction"],
},
format="json",
)
assert response.status_code == 404
def test_create_project_threshold(self) -> None:
assert not ProjectTransactionThresholdOverride.objects.filter(
transaction=self.data["transaction"],
project=self.project,
organization=self.org,
).exists()
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"transaction": self.data["transaction"],
"project": [self.project.id],
"metric": "duration",
"threshold": "600",
},
)
assert response.status_code == 201, response.content
assert response.data["threshold"] == "600"
assert response.data["metric"] == "duration"
assert response.data["editedBy"] == str(self.user.id)
assert ProjectTransactionThresholdOverride.objects.filter(
transaction=self.data["transaction"],
project=self.project,
organization=self.org,
).exists()
def test_creating_too_many_project_thresholds_raises_error(self) -> None:
ProjectTransactionThresholdOverride.objects.create(
project=self.project,
organization=self.project.organization,
threshold=300,
metric=TransactionMetric.DURATION.value,
transaction="fire",
)
MAX_TRANSACTION_THRESHOLDS_PER_PROJECT = 1
with mock.patch(
"sentry.api.endpoints.project_transaction_threshold_override.MAX_TRANSACTION_THRESHOLDS_PER_PROJECT",
MAX_TRANSACTION_THRESHOLDS_PER_PROJECT,
):
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"transaction": self.data["transaction"],
"project": [self.project.id],
"metric": "duration",
"threshold": "600",
},
)
assert response.status_code == 400
assert response.data == {
"non_field_errors": ["At most 1 configured transaction thresholds per project."]
}
def test_update_project_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"transaction": self.data["transaction"],
"project": [self.project.id],
"metric": "duration",
"threshold": "300",
},
)
assert response.status_code == 201, response.content
assert response.data["threshold"] == "300"
assert response.data["metric"] == "duration"
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"transaction": self.data["transaction"],
"project": [self.project.id],
"metric": "lcp",
"threshold": "600",
},
)
assert response.status_code == 200, response.content
assert response.data["threshold"] == "600"
assert response.data["metric"] == "lcp"
def test_clear_project_threshold(self) -> None:
ProjectTransactionThresholdOverride.objects.create(
project=self.project,
transaction=self.data["transaction"],
organization=self.project.organization,
threshold=900,
metric=TransactionMetric.LCP.value,
)
assert ProjectTransactionThresholdOverride.objects.filter(
transaction=self.data["transaction"],
project=self.project,
organization=self.project.organization,
).exists()
with self.feature(self.feature_name):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"transaction": self.data["transaction"],
},
)
assert response.status_code == 204
assert not ProjectTransactionThresholdOverride.objects.filter(
transaction=self.data["transaction"],
project=self.project,
organization=self.project.organization,
).exists()
| ProjectTransactionThresholdOverrideTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataplex.py | {
"start": 3279,
"end": 3541
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Catalog EntryType link."""
name = "Dataplex Catalog EntryType"
key = "dataplex_catalog_entry_type_key"
format_str = DATAPLEX_CATALOG_ENTRY_TYPE_LINK
| DataplexCatalogEntryTypeLink |
python | tensorflow__tensorflow | tensorflow/core/function/capture/free_vars_detect_test.py | {
"start": 1406,
"end": 14154
} | class ____(parameterized.TestCase):
def test_func_arg(self):
x = 1 # pylint: disable=unused-variable
def f(x):
return x + 1
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertEmpty(func_map)
def test_func_local_var(self):
def f():
x = 1
return x + 1
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertEmpty(func_map)
def test_global_var_int(self):
x = 1
def f():
return x + 1
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["x"])
def test_builtin_func(self):
def f(x):
return len(x)
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertEmpty(func_map)
def test_global_var_dict(self):
glob = {"a": 1}
def f():
return glob["a"] + 1
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_global_var_dict_w_var_index(self):
glob = {"a": 1}
key = "a"
def f():
return glob[key] + 1
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["glob", "key"])
def test_duplicate_global_var(self):
x = 1
def f():
return x + x
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["x"])
@parameterized.named_parameters(
("lambda_1", lambda _x: 3,), ("lambda_2", lambda _x: 3,))
def test_multiple_lambda_w_same_line_num_and_args(self, fn):
func_map = free_vars_detect._detect_function_free_vars(fn)
self.assertEmpty(func_map)
def test_lambda_wo_free_var(self):
f = lambda x: x + x
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertEmpty(func_map)
def test_lambda_w_free_var(self):
glob = 1
f = lambda x: x + glob
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_multi_lambda_w_free_var(self):
glob = 1
g = lambda x: x + glob
h = lambda: glob + 1
def f(x):
return g(x) + h()
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertLen(func_map, 3)
self.assertIn("f", func_map.keys())
self.assertIn("g", func_map.keys())
self.assertIn("h", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["g", "h"])
free_vars = get_var_name(func_map["g"])
self.assertSequenceEqual(free_vars, ["glob"])
free_vars = get_var_name(func_map["h"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_lambda_inline(self):
glob = 1
def f(x):
return lambda: x + glob
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_glob_numpy_var(self):
a = 0
b = np.asarray(1)
def f():
c = np.asarray(2)
res = a + b + c
return res
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["a", "b"])
def test_global_var_in_nested_func(self):
x = 1
def f():
def g():
return x + 1
return g()
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
self.assertLen(func_map.keys(), 1)
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["x"])
def test_global_var_from_outer_func(self):
x = 1
def g():
return x + 1
def f():
return g()
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
self.assertIn("g", func_map.keys())
self.assertLen(func_map.keys(), 2)
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["g"])
free_vars = get_var_name(func_map["g"])
self.assertSequenceEqual(free_vars, ["x"])
def test_method(self):
x = 1
class Foo():
def f(self):
return x
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 1)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["x"])
def test_method_w_method_call(self):
x = 0
class Foo():
def f(self):
return self.g
def g(self):
return [x]
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 2)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["self.g"])
self.assertIn("Foo.g", func_map.keys())
free_vars = get_var_name(func_map["Foo.g"])
self.assertSequenceEqual(free_vars, ["x"])
def test_method_w_self_as_arg(self):
x = 1
class Foo():
def f(self):
return self.g(self)
def g(self, obj):
if obj != self:
return x
else:
return -x
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 2)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["self.g"])
self.assertIn("Foo.g", func_map.keys())
free_vars = get_var_name(func_map["Foo.g"])
self.assertSequenceEqual(free_vars, ["x"])
def test_self_inside_method(self):
x = 1
class Foo():
def __init__(self):
self.val = 2
def bar(self):
def tf_func():
return self.val + x
return tf_func
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.bar())
self.assertLen(func_map.keys(), 1)
self.assertIn("tf_func", func_map.keys())
free_vars = get_var_name(func_map["tf_func"])
self.assertSequenceEqual(free_vars, ["self", "self.val", "x"])
def test_self_inside_function_w_multiple_closures(self):
# Test when a function contins multiple closures
class Foo():
def method(self):
class Baz():
def baz_str(self):
return "Baz"
baz = Baz()
x = "x"
class Bar():
def bar_str(self):
return x + "Bar"
def method(self):
def fn():
return self.bar_str() + baz.baz_str()
return fn
bar = Bar()
return bar.method()
foo = Foo()
fn = foo.method()
# cells for `self.bar_str()`, `baz.baz_str()`
self.assertLen(fn.__closure__, 2)
func_map = free_vars_detect._detect_function_free_vars(fn)
self.assertLen(func_map.keys(), 2)
self.assertIn("fn", func_map.keys())
free_vars = get_var_name(func_map["fn"])
self.assertSequenceEqual(free_vars, ["baz", "self", "self.bar_str"])
self.assertIn("Bar.bar_str", func_map.keys())
free_vars = get_var_name(func_map["Bar.bar_str"])
self.assertSequenceEqual(free_vars, ["x"])
def test_method_w_self_attribute(self):
x = 0
class Foo():
def __init__(self):
self.x = 1
self.y = 2
def f(self):
return self.g + self.x + self.y
def g(self):
return x
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 2)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["self.g", "self.x", "self.y"])
self.assertIn("Foo.g", func_map.keys())
free_vars = get_var_name(func_map["Foo.g"])
self.assertSequenceEqual(free_vars, ["x"])
def test_method_w_multiple_attributes(self):
glob = "dummy_value"
class Foo():
def f(self):
return self.g.h.x.y.z
def g(self):
return glob
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 2)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["self.g"])
self.assertIn("Foo.g", func_map.keys())
free_vars = get_var_name(func_map["Foo.g"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_classmethod_decorator(self):
glob = 1
class Foo():
@classmethod
def f(cls):
return glob
func_map = free_vars_detect._detect_function_free_vars(Foo.f)
self.assertLen(func_map.keys(), 1)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_method_call_classmethod(self):
glob = 1
class Foo():
def f(self):
return self.g()
@classmethod
def g(cls):
return glob
foo = Foo()
func_map = free_vars_detect._detect_function_free_vars(foo.f)
self.assertLen(func_map.keys(), 2)
self.assertIn("Foo.f", func_map.keys())
free_vars = get_var_name(func_map["Foo.f"])
self.assertSequenceEqual(free_vars, ["self.g"])
self.assertIn("Foo.g", func_map.keys())
free_vars = get_var_name(func_map["Foo.g"])
self.assertSequenceEqual(free_vars, ["glob"])
def test_global_var_from_renamed_outer_func(self):
x = 1
def g():
return x + 1
def f():
h = g
return h()
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
self.assertIn("g", func_map.keys())
self.assertLen(func_map.keys(), 2)
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["g"])
free_vars = get_var_name(func_map["g"])
self.assertSequenceEqual(free_vars, ["x"])
def test_decorated_method_w_self_no_exception(self):
"""Test this pattern does not raise any exceptions."""
def dummy_tf_function(func):
func_map = free_vars_detect._detect_function_free_vars(func)
self.assertLen(func_map, 1)
self.assertIn("foo", func_map.keys())
free_vars = get_var_name(func_map["foo"])
self.assertSequenceEqual(free_vars, ["dummy_tf_function"])
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
glob = 1
# This pattern is not fully supported yet in the sense that `self.bar()` is
# not inspected so `glob` cannot be detected.
# The reason is the neither `self` nor `self.bar` is accessible from the
# perspective of dummy_tf_function decorator.
# One possible solution is parsing the source code of the whole module,
# instead of single function. And probably get the source of `self.bar`
# from the AST of the module where `Foo` is defined. One potentail challenge
# of this approach is how to locate the decorated function in the AST.
class Foo():
@dummy_tf_function
def foo(self):
return self.bar()
def bar(self):
return glob
_ = Foo()
# Use `wrapper_first` to control different arguments order
@parameterized.parameters(
(functools.update_wrapper, True),
(tf_decorator.make_decorator, False))
def test_func_w_decorator(self, make_decorator, wrapper_first):
x = 1
def decorator_foo(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
if wrapper_first:
return make_decorator(wrapper, func)
else:
return make_decorator(func, wrapper)
@decorator_foo
@decorator_foo
def f():
@decorator_foo
@decorator_foo
def g():
return x + 1
return g()
func_map = free_vars_detect._detect_function_free_vars(f)
self.assertIn("f", func_map.keys())
self.assertLen(func_map.keys(), 2)
free_vars = get_var_name(func_map["f"])
self.assertSequenceEqual(free_vars, ["decorator_foo", "x"])
# TODO(panzf): test the pattern when callable function args are supported
@unittest.skip("Feature not implemented")
def test_global_var_from_arg_func(self):
x = 1
def g():
return x + 1
def f(h):
return h()
_ = f(g)
| FreeVarDetectionTest |
python | python__mypy | mypy/indirection.py | {
"start": 138,
"end": 6023
} | class ____(TypeVisitor[None]):
"""Returns all module references within a particular type."""
def __init__(self) -> None:
# Module references are collected here
self.modules: set[str] = set()
# User to avoid infinite recursion with recursive types
self.seen_types: set[types.TypeAliasType | types.Instance] = set()
def find_modules(self, typs: Iterable[types.Type]) -> set[str]:
self.modules = set()
self.seen_types = set()
for typ in typs:
self._visit(typ)
return self.modules
def _visit(self, typ: types.Type) -> None:
# Note: instances are needed for `class str(Sequence[str]): ...`
if (
isinstance(typ, types.TypeAliasType)
or isinstance(typ, types.ProperType)
and isinstance(typ, types.Instance)
):
# Avoid infinite recursion for recursive types.
if typ in self.seen_types:
return
self.seen_types.add(typ)
typ.accept(self)
def _visit_type_tuple(self, typs: tuple[types.Type, ...]) -> None:
# Micro-optimization: Specialized version of _visit for lists
for typ in typs:
if (
isinstance(typ, types.TypeAliasType)
or isinstance(typ, types.ProperType)
and isinstance(typ, types.Instance)
):
# Avoid infinite recursion for recursive types.
if typ in self.seen_types:
continue
self.seen_types.add(typ)
typ.accept(self)
def _visit_type_list(self, typs: list[types.Type]) -> None:
# Micro-optimization: Specialized version of _visit for tuples
for typ in typs:
if (
isinstance(typ, types.TypeAliasType)
or isinstance(typ, types.ProperType)
and isinstance(typ, types.Instance)
):
# Avoid infinite recursion for recursive types.
if typ in self.seen_types:
continue
self.seen_types.add(typ)
typ.accept(self)
def visit_unbound_type(self, t: types.UnboundType) -> None:
self._visit_type_tuple(t.args)
def visit_any(self, t: types.AnyType) -> None:
pass
def visit_none_type(self, t: types.NoneType) -> None:
pass
def visit_uninhabited_type(self, t: types.UninhabitedType) -> None:
pass
def visit_erased_type(self, t: types.ErasedType) -> None:
pass
def visit_deleted_type(self, t: types.DeletedType) -> None:
pass
def visit_type_var(self, t: types.TypeVarType) -> None:
self._visit_type_list(t.values)
self._visit(t.upper_bound)
self._visit(t.default)
def visit_param_spec(self, t: types.ParamSpecType) -> None:
self._visit(t.upper_bound)
self._visit(t.default)
self._visit(t.prefix)
def visit_type_var_tuple(self, t: types.TypeVarTupleType) -> None:
self._visit(t.upper_bound)
self._visit(t.default)
def visit_unpack_type(self, t: types.UnpackType) -> None:
t.type.accept(self)
def visit_parameters(self, t: types.Parameters) -> None:
self._visit_type_list(t.arg_types)
def visit_instance(self, t: types.Instance) -> None:
# Instance is named, record its definition and continue digging into
# components that constitute semantic meaning of this type: bases, metaclass,
# tuple type, and typeddict type.
# Note: we cannot simply record the MRO, in case an intermediate base contains
# a reference to type alias, this affects meaning of map_instance_to_supertype(),
# see e.g. testDoubleReexportGenericUpdated.
self._visit_type_tuple(t.args)
if t.type:
# Important optimization: instead of simply recording the definition and
# recursing into bases, record the MRO and only traverse generic bases.
for s in t.type.mro:
self.modules.add(s.module_name)
for base in s.bases:
if base.args:
self._visit_type_tuple(base.args)
if t.type.metaclass_type:
self._visit(t.type.metaclass_type)
if t.type.typeddict_type:
self._visit(t.type.typeddict_type)
if t.type.tuple_type:
self._visit(t.type.tuple_type)
def visit_callable_type(self, t: types.CallableType) -> None:
self._visit_type_list(t.arg_types)
self._visit(t.ret_type)
self._visit_type_tuple(t.variables)
def visit_overloaded(self, t: types.Overloaded) -> None:
for item in t.items:
self._visit(item)
self._visit(t.fallback)
def visit_tuple_type(self, t: types.TupleType) -> None:
self._visit_type_list(t.items)
self._visit(t.partial_fallback)
def visit_typeddict_type(self, t: types.TypedDictType) -> None:
self._visit_type_list(list(t.items.values()))
self._visit(t.fallback)
def visit_literal_type(self, t: types.LiteralType) -> None:
self._visit(t.fallback)
def visit_union_type(self, t: types.UnionType) -> None:
self._visit_type_list(t.items)
def visit_partial_type(self, t: types.PartialType) -> None:
pass
def visit_type_type(self, t: types.TypeType) -> None:
self._visit(t.item)
def visit_type_alias_type(self, t: types.TypeAliasType) -> None:
# Type alias is named, record its definition and continue digging into
# components that constitute semantic meaning of this type: target and args.
if t.alias:
self.modules.add(t.alias.module)
self._visit(t.alias.target)
self._visit_type_list(t.args)
| TypeIndirectionVisitor |
python | ansible__ansible | test/integration/targets/cache-plugins/cache_plugins/dummy_file_cache.py | {
"start": 1055,
"end": 1390
} | class ____(BaseFileCacheModule):
_persistent = False
def _load(self, filepath: str) -> object:
with open(filepath, 'r') as jfile:
return eval(filepath.read())
def _dump(self, value: object, filepath: str) -> None:
with open(filepath, 'w') as afile:
afile.write(str(value))
| CacheModule |
python | mozilla__bleach | bleach/_vendor/parse.py | {
"start": 28146,
"end": 39015
} | class ____(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<%s %r>" % (self.__class__.__name__, dict(self))
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding and errors
must not be specified if string is a bytes object.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None,
quote_via=quote_plus):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
_typeprog = re.compile('([^/:]+):(.*)', re.DOTALL)
match = _typeprog.match(url)
if match:
scheme, data = match.groups()
return scheme.lower(), data
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
_hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL)
match = _hostprog.match(url)
if match:
host_port, path = match.groups()
if path and path[0] != '/':
path = '/' + path
return host_port, path
return None, url
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
user, delim, passwd = user.partition(':')
return user, (passwd if delim else None)
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
_portprog = re.compile('(.*):([0-9]*)$', re.DOTALL)
match = _portprog.match(host)
if match:
host, port = match.groups()
if port:
return host, port
return host, None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
host, delim, port = host.rpartition(':')
if not delim:
host = port
elif port:
try:
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
path, delim, query = url.rpartition('?')
if delim:
return path, query
return url, None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
path, delim, tag = url.rpartition('#')
if delim:
return path, tag
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
attr, delim, value = attr.partition('=')
return attr, (value if delim else None)
| Quoter |
python | PrefectHQ__prefect | tests/server/models/test_orm.py | {
"start": 14029,
"end": 21436
} | class ____:
async def test_flow_run_estimated_run_time_matches_total_run_time(
self, session, flow, db
):
dt = now("UTC") - datetime.timedelta(minutes=1)
fr = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, state=schemas.states.Pending(timestamp=dt)
),
)
# move into a running state for 3 seconds, then complete
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=fr.id,
state=schemas.states.Running(timestamp=dt + datetime.timedelta(seconds=1)),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=fr.id,
state=schemas.states.Completed(
timestamp=dt + datetime.timedelta(seconds=4)
),
)
assert fr.total_run_time == datetime.timedelta(seconds=3)
assert fr.estimated_run_time == datetime.timedelta(seconds=3)
# check SQL logic
await session.commit()
result = await session.execute(
sa.select(db.FlowRun.estimated_run_time).filter_by(id=fr.id)
)
assert result.scalar() == datetime.timedelta(seconds=3)
async def test_flow_run_estimated_run_time_includes_current_run(
self, session, flow, db
):
tolerance = datetime.timedelta(seconds=3)
dt = now("UTC") - datetime.timedelta(minutes=1)
fr = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, state=schemas.states.Pending(timestamp=dt)
),
)
# move into a running state
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=fr.id,
state=schemas.states.Running(timestamp=dt + datetime.timedelta(seconds=1)),
)
assert fr.total_run_time == datetime.timedelta(0)
# the estimated time is between ~59 and ~60 seconds
assert (
datetime.timedelta(seconds=59) - tolerance
< fr.estimated_run_time
< datetime.timedelta(seconds=60) + tolerance
)
# check SQL logic
await session.commit()
result = await session.execute(
sa.select(db.FlowRun.estimated_run_time).filter_by(id=fr.id)
)
assert (
datetime.timedelta(seconds=59) - tolerance
< result.scalar()
< datetime.timedelta(seconds=60) + tolerance
)
async def test_task_run_estimated_run_time_matches_total_run_time(
self, session, flow_run, db
):
dt = now("UTC") - datetime.timedelta(minutes=1)
tr = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id,
task_key="a",
dynamic_key="0",
state=schemas.states.Pending(timestamp=dt),
),
)
# move into a running state for 3 seconds, then complete
await models.task_runs.set_task_run_state(
session=session,
task_run_id=tr.id,
state=schemas.states.Running(timestamp=dt + datetime.timedelta(seconds=1)),
)
await models.task_runs.set_task_run_state(
session=session,
task_run_id=tr.id,
state=schemas.states.Completed(
timestamp=dt + datetime.timedelta(seconds=4)
),
)
assert tr.total_run_time == datetime.timedelta(seconds=3)
assert tr.estimated_run_time == datetime.timedelta(seconds=3)
# check SQL logic
await session.commit()
result = await session.execute(
sa.select(db.TaskRun.estimated_run_time).filter_by(id=tr.id)
)
assert result.scalar() == datetime.timedelta(seconds=3)
async def test_task_run_estimated_run_time_includes_current_run(
self, session, flow_run, db
):
tolerance = datetime.timedelta(seconds=3)
dt = now("UTC") - datetime.timedelta(minutes=1)
tr = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id,
task_key="a",
dynamic_key="0",
state=schemas.states.Pending(timestamp=dt),
),
)
# move into a running state
await models.task_runs.set_task_run_state(
session=session,
task_run_id=tr.id,
state=schemas.states.Running(timestamp=dt + datetime.timedelta(seconds=1)),
)
assert tr.total_run_time == datetime.timedelta(0)
# the estimated time is between ~59 and ~60 seconds
assert (
datetime.timedelta(seconds=59) - tolerance
< tr.estimated_run_time
< datetime.timedelta(seconds=60) + tolerance
)
# check SQL logic
await session.commit()
result = await session.execute(
sa.select(db.TaskRun.estimated_run_time).filter_by(id=tr.id)
)
assert (
datetime.timedelta(seconds=59) - tolerance
< result.scalar()
< datetime.timedelta(seconds=60) + tolerance
)
async def test_estimated_run_time_in_correlated_subquery(self, session, flow, db):
"""
The estimated_run_time includes a .correlate() statement that ensures it can
be used as a correlated subquery within other selects or joins.
"""
dt = now("UTC") - datetime.timedelta(minutes=1)
fr = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, state=schemas.states.Pending(timestamp=dt)
),
)
# move into a running state for 3 seconds, then complete
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=fr.id,
state=schemas.states.Running(timestamp=dt + datetime.timedelta(seconds=1)),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=fr.id,
state=schemas.states.Completed(
timestamp=dt + datetime.timedelta(seconds=4)
),
)
await session.commit()
query = (
sa.select(
db.FlowRun.id,
db.FlowRun.estimated_run_time,
db.FlowRunState.type,
)
.select_from(db.FlowRun)
.join(
db.FlowRunState,
db.FlowRunState.id == db.FlowRun.state_id,
)
.where(db.FlowRun.id == fr.id)
)
# this query has only one FROM clause due to correlation
assert str(query).count("FROM") == 1
# this query has only one JOIN clause due to correlation
assert str(query).count("JOIN") == 1
result = await session.execute(query)
assert result.all() == [
(
fr.id,
datetime.timedelta(seconds=3),
schemas.states.StateType.COMPLETED,
)
]
| TestTotalRunTimeEstimate |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 16223,
"end": 16518
} | class ____(object):
"""
A named metadata node.
Do not instantiate directly, use Module.add_named_metadata() instead.
"""
def __init__(self, parent):
self.parent = parent
self.operands = []
def add(self, md):
self.operands.append(md)
| NamedMetaData |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 143255,
"end": 162882
} | class ____(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecondCmsgTruncInData.client_skip
def _testSecondCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
| RFC3542AncillaryTest |
python | django__django | tests/test_utils/tests.py | {
"start": 48383,
"end": 48840
} | class ____(TestCase):
fixtures = ["person.json"]
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
| SkippingExtraTests |
python | ray-project__ray | python/ray/data/_internal/execution/operators/hash_shuffle.py | {
"start": 46497,
"end": 49381
} | class ____(HashShufflingOperatorBase):
def __init__(
self,
input_op: PhysicalOperator,
data_context: DataContext,
*,
key_columns: Tuple[str],
num_partitions: Optional[int] = None,
should_sort: bool = False,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
):
super().__init__(
name_factory=(
lambda num_partitions: f"Shuffle(key_columns={key_columns}, num_partitions={num_partitions})"
),
input_ops=[input_op],
data_context=data_context,
key_columns=[key_columns],
num_partitions=num_partitions,
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
partition_aggregation_factory=(
lambda aggregator_id, target_partition_ids: Concat(
aggregator_id,
target_partition_ids,
should_sort=should_sort,
key_columns=key_columns,
)
),
shuffle_progress_bar_name="Shuffle",
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.hash_shuffle_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
)
# Estimate of object store memory required to accommodate all partitions
# handled by a single aggregator
aggregator_shuffle_object_store_memory_required: int = math.ceil(
estimated_dataset_bytes / num_aggregators
)
# Estimate of memory required to accommodate single partition as an output
# (inside Object Store)
output_object_store_memory_required: int = partition_byte_size_estimate
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Output (object store)
output_object_store_memory_required
)
logger.info(
f"Estimated memory requirement for shuffling aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total={aggregator_total_memory_required / MiB:.1f}MiB, "
)
return aggregator_total_memory_required
@dataclass
| HashShuffleOperator |
python | huggingface__transformers | src/transformers/models/siglip/processing_siglip.py | {
"start": 702,
"end": 1424
} | class ____(ProcessorMixin):
r"""
Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor.
[`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the
[`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information.
Args:
image_processor ([`SiglipImageProcessor`]):
The image processor is a required input.
tokenizer ([`SiglipTokenizer`]):
The tokenizer is a required input.
"""
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
__all__ = ["SiglipProcessor"]
| SiglipProcessor |
python | getsentry__sentry | src/sentry/notifications/platform/email/provider.py | {
"start": 5994,
"end": 6586
} | class ____(NotificationProvider[EmailRenderable]):
key = NotificationProviderKey.EMAIL
default_renderer = EmailRenderer
target_class = GenericNotificationTarget
target_resource_types = [NotificationTargetResourceType.EMAIL]
@classmethod
def is_available(cls, *, organization: RpcOrganizationSummary | None = None) -> bool:
return True
@classmethod
def send(cls, *, target: NotificationTarget, renderable: EmailRenderable) -> None:
email = renderable
email.to = [target.resource_id]
send_messages([email])
| EmailNotificationProvider |
python | ethereum__web3.py | web3/_utils/module_testing/eth_module.py | {
"start": 4647,
"end": 102626
} | class ____:
@pytest.mark.asyncio
async def test_eth_gas_price(self, async_w3: "AsyncWeb3[Any]") -> None:
gas_price = await async_w3.eth.gas_price
assert gas_price > 0
@pytest.mark.asyncio
async def test_is_connected(self, async_w3: "AsyncWeb3[Any]") -> None:
is_connected = await async_w3.is_connected()
assert is_connected is True
@pytest.mark.asyncio
async def test_eth_send_transaction_legacy(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"gasPrice": await async_w3.eth.gas_price,
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 21000
assert txn["gasPrice"] == txn_params["gasPrice"]
@pytest.mark.asyncio
async def test_eth_modify_transaction_legacy(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"gasPrice": async_w3.to_wei(
1, "gwei"
), # must be greater than base_fee post London
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
modified_txn_hash = await async_w3.eth.modify_transaction(
txn_hash, gasPrice=(cast(Wei, txn_params["gasPrice"] * 2)), value=Wei(2)
)
modified_txn = await async_w3.eth.get_transaction(modified_txn_hash)
assert is_same_address(
modified_txn["from"], cast(ChecksumAddress, txn_params["from"])
)
assert is_same_address(
modified_txn["to"], cast(ChecksumAddress, txn_params["to"])
)
assert modified_txn["value"] == 2
assert modified_txn["gas"] == 21000
assert modified_txn["gasPrice"] == cast(int, txn_params["gasPrice"]) * 2
@pytest.mark.asyncio
async def test_eth_modify_transaction(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
modified_txn_hash = await async_w3.eth.modify_transaction(
txn_hash,
value=Wei(2),
maxPriorityFeePerGas=(cast(Wei, txn_params["maxPriorityFeePerGas"] * 2)),
maxFeePerGas=(cast(Wei, txn_params["maxFeePerGas"] * 2)),
)
modified_txn = await async_w3.eth.get_transaction(modified_txn_hash)
assert is_same_address(
modified_txn["from"], cast(ChecksumAddress, txn_params["from"])
)
assert is_same_address(
modified_txn["to"], cast(ChecksumAddress, txn_params["to"])
)
assert modified_txn["value"] == 2
assert modified_txn["gas"] == 21000
assert (
modified_txn["maxPriorityFeePerGas"]
== cast(Wei, txn_params["maxPriorityFeePerGas"]) * 2
)
assert modified_txn["maxFeePerGas"] == cast(Wei, txn_params["maxFeePerGas"]) * 2
@pytest.mark.asyncio
async def test_async_eth_sign_transaction(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
"nonce": Nonce(0),
}
result = await async_w3.eth.sign_transaction(txn_params)
signatory_account = async_w3.eth.account.recover_transaction(result["raw"])
assert async_keyfile_account_address_dual_type == signatory_account
assert result["tx"]["to"] == txn_params["to"]
assert result["tx"]["value"] == txn_params["value"]
assert result["tx"]["gas"] == txn_params["gas"]
assert result["tx"]["maxFeePerGas"] == txn_params["maxFeePerGas"]
assert (
result["tx"]["maxPriorityFeePerGas"] == txn_params["maxPriorityFeePerGas"]
)
assert result["tx"]["nonce"] == txn_params["nonce"]
@pytest.mark.asyncio
async def test_eth_sign_typed_data(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
async_skip_if_testrpc: Callable[["AsyncWeb3[Any]"], None],
) -> None:
validJSONMessage = """
{
"types": {
"EIP712Domain": [
{"name": "name", "type": "string"},
{"name": "version", "type": "string"},
{"name": "chainId", "type": "uint256"},
{"name": "verifyingContract", "type": "address"}
],
"Person": [
{"name": "name", "type": "string"},
{"name": "wallet", "type": "address"}
],
"Mail": [
{"name": "from", "type": "Person"},
{"name": "to", "type": "Person"},
{"name": "contents", "type": "string"}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": "0x01",
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": {
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
},
"contents": "Hello, Bob!"
}
}
"""
async_skip_if_testrpc(async_w3)
signature = HexBytes(
await async_w3.eth.sign_typed_data(
async_keyfile_account_address_dual_type, json.loads(validJSONMessage)
)
)
assert len(signature) == 32 + 32 + 1
@pytest.mark.asyncio
async def test_invalid_eth_sign_typed_data(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
async_skip_if_testrpc: Callable[["AsyncWeb3[Any]"], None],
) -> None:
async_skip_if_testrpc(async_w3)
invalid_typed_message = """
{
"types": {
"EIP712Domain": [
{"name": "name", "type": "string"},
{"name": "version", "type": "string"},
{"name": "chainId", "type": "uint256"},
{"name": "verifyingContract", "type": "address"}
],
"Person": [
{"name": "name", "type": "string"},
{"name": "wallet", "type": "address"}
],
"Mail": [
{"name": "from", "type": "Person"},
{"name": "to", "type": "Person[2]"},
{"name": "contents", "type": "string"}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": "0x01",
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": [{
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
}],
"contents": "Hello, Bob!"
}
}
"""
with pytest.raises(
Web3ValueError,
match=r".*Expected 2 items for array type Person\[2\], got 1 items.*",
):
await async_w3.eth.sign_typed_data(
async_keyfile_account_address_dual_type,
json.loads(invalid_typed_message),
)
@pytest.mark.asyncio
async def test_async_eth_sign_transaction_legacy(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"gasPrice": await async_w3.eth.gas_price,
"nonce": Nonce(0),
}
result = await async_w3.eth.sign_transaction(txn_params)
signatory_account = async_w3.eth.account.recover_transaction(result["raw"])
assert async_keyfile_account_address == signatory_account
assert result["tx"]["to"] == txn_params["to"]
assert result["tx"]["value"] == txn_params["value"]
assert result["tx"]["gas"] == txn_params["gas"]
assert result["tx"]["gasPrice"] == txn_params["gasPrice"]
assert result["tx"]["nonce"] == txn_params["nonce"]
@pytest.mark.asyncio
async def test_async_eth_sign_transaction_hex_fees(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": hex(async_w3.to_wei(2, "gwei")),
"maxPriorityFeePerGas": hex(async_w3.to_wei(1, "gwei")),
"nonce": Nonce(0),
}
result = await async_w3.eth.sign_transaction(txn_params)
signatory_account = async_w3.eth.account.recover_transaction(result["raw"])
assert async_keyfile_account_address == signatory_account
assert result["tx"]["to"] == txn_params["to"]
assert result["tx"]["value"] == txn_params["value"]
assert result["tx"]["gas"] == txn_params["gas"]
assert result["tx"]["maxFeePerGas"] == int(str(txn_params["maxFeePerGas"]), 16)
assert result["tx"]["maxPriorityFeePerGas"] == int(
str(txn_params["maxPriorityFeePerGas"]), 16
)
assert result["tx"]["nonce"] == txn_params["nonce"]
@pytest.mark.asyncio
async def test_async_eth_sign_transaction_ens_names(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
with ens_addresses(
async_w3, {"unlocked-account.eth": async_keyfile_account_address}
):
txn_params: TxParams = {
"from": "unlocked-account.eth",
"to": "unlocked-account.eth",
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
"nonce": Nonce(0),
}
result = await async_w3.eth.sign_transaction(txn_params)
signatory_account = async_w3.eth.account.recover_transaction(result["raw"])
assert async_keyfile_account_address == signatory_account
assert result["tx"]["to"] == async_keyfile_account_address
assert result["tx"]["value"] == txn_params["value"]
assert result["tx"]["gas"] == txn_params["gas"]
assert result["tx"]["maxFeePerGas"] == txn_params["maxFeePerGas"]
assert (
result["tx"]["maxPriorityFeePerGas"]
== txn_params["maxPriorityFeePerGas"]
)
assert result["tx"]["nonce"] == txn_params["nonce"]
@pytest.mark.asyncio
async def test_eth_send_transaction(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(3, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 21000
assert txn["maxFeePerGas"] == txn_params["maxFeePerGas"]
assert txn["maxPriorityFeePerGas"] == txn_params["maxPriorityFeePerGas"]
assert txn["gasPrice"] <= txn["maxFeePerGas"] # effective gas price
@pytest.mark.asyncio
async def test_eth_send_transaction_default_fees(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 21000
assert is_integer(txn["maxPriorityFeePerGas"])
assert is_integer(txn["maxFeePerGas"])
assert txn["gasPrice"] <= txn["maxFeePerGas"] # effective gas price
@pytest.mark.asyncio
async def test_eth_send_transaction_hex_fees(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": hex(250 * 10**9),
"maxPriorityFeePerGas": hex(2 * 10**9),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 21000
assert txn["maxFeePerGas"] == 250 * 10**9
assert txn["maxPriorityFeePerGas"] == 2 * 10**9
@pytest.mark.asyncio
async def test_eth_send_transaction_no_gas(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"maxFeePerGas": Wei(250 * 10**9),
"maxPriorityFeePerGas": Wei(2 * 10**9),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 121000 # 21000 + buffer
@pytest.mark.asyncio
async def test_eth_send_transaction_with_gas_price(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"gasPrice": Wei(1),
"maxFeePerGas": Wei(250 * 10**9),
"maxPriorityFeePerGas": Wei(2 * 10**9),
}
with pytest.raises(TransactionTypeMismatch):
await async_w3.eth.send_transaction(txn_params)
@pytest.mark.asyncio
async def test_eth_send_transaction_no_priority_fee(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": Wei(250 * 10**9),
}
with pytest.raises(
InvalidTransaction, match="maxPriorityFeePerGas must be defined"
):
await async_w3.eth.send_transaction(txn_params)
@pytest.mark.asyncio
async def test_eth_send_transaction_no_max_fee(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
maxPriorityFeePerGas = async_w3.to_wei(2, "gwei")
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxPriorityFeePerGas": maxPriorityFeePerGas,
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert is_same_address(txn["from"], cast(ChecksumAddress, txn_params["from"]))
assert is_same_address(txn["to"], cast(ChecksumAddress, txn_params["to"]))
assert txn["value"] == 1
assert txn["gas"] == 21000
@pytest.mark.asyncio
async def test_eth_send_transaction_max_fee_less_than_tip(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": Wei(1 * 10**9),
"maxPriorityFeePerGas": Wei(2 * 10**9),
}
with pytest.raises(
InvalidTransaction, match="maxFeePerGas must be >= maxPriorityFeePerGas"
):
await async_w3.eth.send_transaction(txn_params)
@pytest.mark.asyncio
async def test_validation_middleware_chain_id_mismatch(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
wrong_chain_id = 1234567890
actual_chain_id = await async_w3.eth.chain_id
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
"chainId": wrong_chain_id,
}
with pytest.raises(
Web3ValidationError,
match=f"The transaction declared chain ID {wrong_chain_id}, "
f"but the connected node is on {actual_chain_id}",
):
await async_w3.eth.send_transaction(txn_params)
@pytest.mark.asyncio
async def test_ExtraDataToPOAMiddleware(
self, async_w3: "AsyncWeb3[Any]", request_mocker: type[RequestMocker]
) -> None:
async_w3.middleware_onion.inject(ExtraDataToPOAMiddleware, "poa", layer=0)
extra_data = f"0x{'ff' * 33}"
async with request_mocker(
async_w3,
mock_results={"eth_getBlockByNumber": {"extraData": extra_data}},
):
block = await async_w3.eth.get_block("latest")
assert "extraData" not in block
assert block["proofOfAuthorityData"] == to_bytes(hexstr=extra_data)
# clean up
async_w3.middleware_onion.remove("poa")
@pytest.mark.asyncio
async def test_async_eth_send_raw_transaction(
self, async_w3: "AsyncWeb3[Any]", keyfile_account_pkey: HexStr
) -> None:
keyfile_account = async_w3.eth.account.from_key(keyfile_account_pkey)
txn = {
"chainId": 131277322940537, # the chainId set for the fixture
"from": keyfile_account.address,
"to": keyfile_account.address,
"value": Wei(0),
"gas": 21000,
"nonce": await async_w3.eth.get_transaction_count(
keyfile_account.address, "pending"
),
"gasPrice": 10**9,
}
signed = keyfile_account.sign_transaction(txn)
txn_hash = await async_w3.eth.send_raw_transaction(signed.raw_transaction)
assert txn_hash == HexBytes(signed.hash)
@pytest.mark.asyncio
async def test_async_sign_and_send_raw_middleware(
self, async_w3: "AsyncWeb3[Any]", keyfile_account_pkey: HexStr
) -> None:
keyfile_account = async_w3.eth.account.from_key(keyfile_account_pkey)
txn: TxParams = {
"from": keyfile_account.address,
"to": keyfile_account.address,
"value": Wei(0),
"gas": 21000,
}
async_w3.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(keyfile_account), "signing", layer=0
)
txn_hash = await async_w3.eth.send_transaction(txn)
assert isinstance(txn_hash, HexBytes)
# clean up
async_w3.middleware_onion.remove("signing")
@pytest.mark.asyncio
async def test_async_sign_authorization_send_raw_and_send_set_code_transactions(
self,
async_w3: "AsyncWeb3[Any]",
keyfile_account_pkey: HexStr,
async_math_contract: "AsyncContract",
) -> None:
keyfile_account = async_w3.eth.account.from_key(keyfile_account_pkey)
chain_id = await async_w3.eth.chain_id
nonce = await async_w3.eth.get_transaction_count(keyfile_account.address)
auth = {
"chainId": chain_id,
"address": async_math_contract.address,
"nonce": nonce + 1,
}
signed_auth = keyfile_account.sign_authorization(auth)
# get current math counter and increase it only in the delegation by n
math_counter = await async_math_contract.functions.counter().call()
data = async_math_contract.encode_abi("incrementCounter", [math_counter + 1337])
txn: TxParams = {
"chainId": chain_id,
"to": keyfile_account.address,
"value": Wei(0),
"gas": 200_000,
"nonce": nonce,
"maxPriorityFeePerGas": Wei(10**9),
"maxFeePerGas": Wei(10**9),
"data": data,
"authorizationList": [signed_auth],
}
# test eth_sendRawTransaction
signed = keyfile_account.sign_transaction(txn)
tx_hash = await async_w3.eth.send_raw_transaction(signed.raw_transaction)
get_tx = await async_w3.eth.get_transaction(tx_hash)
await async_w3.eth.wait_for_transaction_receipt(tx_hash)
code = await async_w3.eth.get_code(keyfile_account.address)
assert code.to_0x_hex() == f"0xef0100{async_math_contract.address[2:].lower()}"
delegated = async_w3.eth.contract(
address=keyfile_account.address, abi=async_math_contract.abi
)
# assert the math counter is increased by 1337 only in delegated acct
assert await async_math_contract.functions.counter().call() == math_counter
delegated_call = await delegated.functions.counter().call(
block_identifier="latest"
)
assert delegated_call == math_counter + 1337
assert len(get_tx["authorizationList"]) == 1
get_auth = get_tx["authorizationList"][0]
assert get_auth["chainId"] == chain_id
assert get_auth["address"] == async_math_contract.address
assert get_auth["nonce"] == nonce + 1
assert isinstance(get_auth["yParity"], int)
assert isinstance(get_auth["r"], HexBytes)
assert isinstance(get_auth["s"], HexBytes)
# reset code
reset_auth = {
"chainId": chain_id,
"address": "0x" + ("00" * 20),
"nonce": nonce + 3,
}
signed_reset_auth = keyfile_account.sign_authorization(reset_auth)
reset_code_txn = merge(
txn,
{
"from": keyfile_account.address,
"authorizationList": [signed_reset_auth],
"nonce": nonce + 2,
},
)
# test eth_sendTransaction
reset_tx_hash = await async_w3.eth.send_transaction(reset_code_txn)
await async_w3.eth.wait_for_transaction_receipt(reset_tx_hash, timeout=10)
reset_code = await async_w3.eth.get_code(keyfile_account.address)
assert reset_code == HexBytes("0x")
@pytest.mark.asyncio
async def test_GasPriceStrategyMiddleware(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
}
two_gwei_in_wei = async_w3.to_wei(2, "gwei")
def gas_price_strategy(w3: "Web3", txn: TxParams) -> Wei:
return two_gwei_in_wei
async_w3.eth.set_gas_price_strategy(gas_price_strategy)
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert txn["gasPrice"] == two_gwei_in_wei
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
async def test_gas_price_strategy_middleware_hex_value(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
}
two_gwei_in_wei = async_w3.to_wei(2, "gwei")
def gas_price_strategy(_w3: "Web3", _txn: TxParams) -> str:
return hex(two_gwei_in_wei)
async_w3.eth.set_gas_price_strategy(gas_price_strategy) # type: ignore
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
assert txn["gasPrice"] == two_gwei_in_wei
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
@pytest.mark.parametrize(
"max_fee", (1000000000, None), ids=["with_max_fee", "without_max_fee"]
)
async def test_gas_price_from_strategy_bypassed_for_dynamic_fee_txn(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
max_fee: Wei,
) -> None:
max_priority_fee = async_w3.to_wei(1, "gwei")
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxPriorityFeePerGas": max_priority_fee,
}
if max_fee is not None:
txn_params = assoc(txn_params, "maxFeePerGas", max_fee)
def gas_price_strategy(w3: "Web3", txn: TxParams) -> Wei:
return async_w3.to_wei(2, "gwei")
async_w3.eth.set_gas_price_strategy(gas_price_strategy)
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
latest_block = await async_w3.eth.get_block("latest")
assert (
txn["maxFeePerGas"] == max_fee
if max_fee is not None
else 2 * latest_block["baseFeePerGas"] + max_priority_fee
)
assert txn["maxPriorityFeePerGas"] == max_priority_fee
assert txn["gasPrice"] <= txn["maxFeePerGas"] # effective gas price
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
async def test_gas_price_from_strategy_bypassed_for_dynamic_fee_txn_no_tip(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": Wei(1000000000),
}
def gas_price_strategy(_w3: "Web3", _txn: TxParams) -> Wei:
return async_w3.to_wei(2, "gwei")
async_w3.eth.set_gas_price_strategy(gas_price_strategy)
with pytest.raises(
InvalidTransaction, match="maxPriorityFeePerGas must be defined"
):
await async_w3.eth.send_transaction(txn_params)
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
async def test_eth_estimate_gas(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
gas_estimate = await async_w3.eth.estimate_gas(
{
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
}
)
assert is_integer(gas_estimate)
assert gas_estimate > 0
@pytest.mark.asyncio
@pytest.mark.parametrize(
"params",
(
{
"nonce": 1, # int
"balance": 1, # int
"code": HexStr("0x"), # HexStr
# with state
"state": {HexStr(f"0x{'00' * 32}"): HexStr(f"0x{'00' * 32}")},
},
{
"nonce": HexStr("0x1"), # HexStr
"balance": HexStr("0x1"), # HexStr
"code": b"\x00", # bytes
# with stateDiff
"stateDiff": {HexStr(f"0x{'00' * 32}"): HexStr(f"0x{'00' * 32}")},
},
),
)
async def test_eth_estimate_gas_with_override_param_type_check(
self,
async_w3: "AsyncWeb3[Any]",
async_math_contract: "AsyncContract",
params: StateOverrideParams,
) -> None:
accounts = await async_w3.eth.accounts
txn_params: TxParams = {"from": accounts[0]}
# assert does not raise
await async_w3.eth.estimate_gas(
txn_params, None, {async_math_contract.address: params}
)
@pytest.mark.asyncio
async def test_eth_fee_history(self, async_w3: "AsyncWeb3[Any]") -> None:
fee_history = await async_w3.eth.fee_history(1, "latest", [50])
assert is_list_like(fee_history["baseFeePerGas"])
assert is_list_like(fee_history["gasUsedRatio"])
assert is_integer(fee_history["oldestBlock"])
assert fee_history["oldestBlock"] >= 0
assert is_list_like(fee_history["reward"])
if len(fee_history["reward"]) > 0:
assert is_list_like(fee_history["reward"][0])
@pytest.mark.asyncio
async def test_eth_fee_history_with_integer(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
fee_history = await async_w3.eth.fee_history(
1, async_empty_block["number"], [50]
)
assert is_list_like(fee_history["baseFeePerGas"])
assert is_list_like(fee_history["gasUsedRatio"])
assert is_integer(fee_history["oldestBlock"])
assert fee_history["oldestBlock"] >= 0
assert is_list_like(fee_history["reward"])
if len(fee_history["reward"]) > 0:
assert is_list_like(fee_history["reward"][0])
@pytest.mark.asyncio
async def test_eth_fee_history_no_reward_percentiles(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
fee_history = await async_w3.eth.fee_history(1, "latest")
assert is_list_like(fee_history["baseFeePerGas"])
assert is_list_like(fee_history["gasUsedRatio"])
assert is_integer(fee_history["oldestBlock"])
assert fee_history["oldestBlock"] >= 0
@pytest.mark.asyncio
async def test_eth_max_priority_fee(self, async_w3: "AsyncWeb3[Any]") -> None:
max_priority_fee = await async_w3.eth.max_priority_fee
assert is_integer(max_priority_fee)
@pytest.mark.asyncio
async def test_eth_max_priority_fee_with_fee_history_calculation(
self, async_w3: "AsyncWeb3[Any]", request_mocker: type[RequestMocker]
) -> None:
async with request_mocker(
async_w3,
mock_errors={RPCEndpoint("eth_maxPriorityFeePerGas"): {}},
mock_results={RPCEndpoint("eth_feeHistory"): {"reward": [[0]]}},
):
with pytest.warns(
UserWarning,
match=(
"There was an issue with the method eth_maxPriorityFeePerGas. "
"Calculating using eth_feeHistory."
),
):
priority_fee = await async_w3.eth.max_priority_fee
assert is_integer(priority_fee)
assert priority_fee == PRIORITY_FEE_MIN
@pytest.mark.asyncio
async def test_eth_getBlockByHash(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block(async_empty_block["hash"])
assert block["hash"] == async_empty_block["hash"]
@pytest.mark.asyncio
async def test_eth_getBlockByHash_not_found(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
with pytest.raises(BlockNotFound):
await async_w3.eth.get_block(UNKNOWN_HASH)
@pytest.mark.asyncio
async def test_eth_getBlockByHash_pending(self, async_w3: "AsyncWeb3[Any]") -> None:
block = await async_w3.eth.get_block("pending")
assert block["hash"] is None
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_with_integer(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block(async_empty_block["number"])
assert block["number"] == async_empty_block["number"]
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_latest(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block("latest")
assert block["hash"] is not None
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_not_found(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
with pytest.raises(BlockNotFound):
await async_w3.eth.get_block(BlockNumber(12345))
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_pending(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block("pending")
assert block["hash"] is None
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_earliest(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
genesis_block = await async_w3.eth.get_block(BlockNumber(0))
block = await async_w3.eth.get_block("earliest")
assert block["number"] == 0
assert block["hash"] == genesis_block["hash"]
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_safe(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block("safe")
assert block is not None
assert isinstance(block["number"], int)
@pytest.mark.asyncio
async def test_eth_getBlockByNumber_finalized(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
block = await async_w3.eth.get_block("finalized")
assert block is not None
assert isinstance(block["number"], int)
@pytest.mark.asyncio
async def test_eth_getBlockReceipts_hash(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
receipts = await async_w3.eth.get_block_receipts(async_empty_block["hash"])
assert isinstance(receipts, list)
@pytest.mark.asyncio
async def test_eth_getBlockReceipts_not_found(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
with pytest.raises(BlockNotFound):
await async_w3.eth.get_block_receipts(UNKNOWN_HASH)
@pytest.mark.asyncio
async def test_eth_getBlockReceipts_with_integer(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
receipts = await async_w3.eth.get_block_receipts(async_empty_block["number"])
assert isinstance(receipts, list)
@pytest.mark.asyncio
async def test_eth_getBlockReceipts_safe(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
receipts = await async_w3.eth.get_block_receipts("safe")
assert isinstance(receipts, list)
@pytest.mark.asyncio
async def test_eth_getBlockReceipts_finalized(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
receipts = await async_w3.eth.get_block_receipts("finalized")
assert isinstance(receipts, list)
@pytest.mark.asyncio
async def test_eth_get_block_by_number_full_transactions(
self, async_w3: "AsyncWeb3[Any]", async_block_with_txn: BlockData
) -> None:
block = await async_w3.eth.get_block(async_block_with_txn["number"], True)
transaction = cast(TxData, block["transactions"][0])
assert transaction["hash"] == async_block_with_txn["transactions"][0]
@pytest.mark.asyncio
async def test_eth_get_raw_transaction(
self, async_w3: "AsyncWeb3[Any]", mined_txn_hash: HexStr
) -> None:
raw_transaction = await async_w3.eth.get_raw_transaction(mined_txn_hash)
assert is_bytes(raw_transaction)
@pytest.mark.asyncio
async def test_eth_get_raw_transaction_raises_error(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
with pytest.raises(
TransactionNotFound, match=f"Transaction with hash: '{UNKNOWN_HASH}'"
):
await async_w3.eth.get_raw_transaction(UNKNOWN_HASH)
@pytest.mark.asyncio
async def test_eth_get_raw_transaction_by_block(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn: BlockData,
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
# eth_getRawTransactionByBlockNumberAndIndex: block identifier
await async_w3.eth.send_transaction(
{
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
}
)
async def wait_for_block_with_txn() -> HexBytes:
while True:
try:
return await async_w3.eth.get_raw_transaction_by_block("latest", 0)
except TransactionNotFound:
await asyncio.sleep(0.1)
continue
raw_txn = await asyncio.wait_for(wait_for_block_with_txn(), timeout=5)
assert is_bytes(raw_txn)
# eth_getRawTransactionByBlockNumberAndIndex: block number
async_block_with_txn_number = async_block_with_txn["number"]
raw_transaction = await async_w3.eth.get_raw_transaction_by_block(
async_block_with_txn_number, 0
)
assert is_bytes(raw_transaction)
# eth_getRawTransactionByBlockHashAndIndex: block hash
async_block_with_txn_hash = async_block_with_txn["hash"]
raw_transaction = await async_w3.eth.get_raw_transaction_by_block(
async_block_with_txn_hash, 0
)
assert is_bytes(raw_transaction)
@pytest.mark.asyncio
@pytest.mark.parametrize("unknown_block_num_or_hash", (1234567899999, UNKNOWN_HASH))
async def test_eth_get_raw_transaction_by_block_raises_error(
self,
async_w3: "AsyncWeb3[Any]",
unknown_block_num_or_hash: int | HexBytes,
) -> None:
with pytest.raises(
TransactionNotFound,
match=(
f"Transaction index: 0 on block id: "
f"{to_hex_if_integer(unknown_block_num_or_hash)!r} "
f"not found."
),
):
await async_w3.eth.get_raw_transaction_by_block(
unknown_block_num_or_hash, 0
)
@pytest.mark.asyncio
async def test_eth_get_raw_transaction_by_block_raises_error_block_identifier(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
unknown_identifier = "unknown"
with pytest.raises(
Web3ValueError,
match=(
"Value did not match any of the recognized block identifiers: "
f"{unknown_identifier}"
),
):
# type ignored because we are testing an invalid block identifier
await async_w3.eth.get_raw_transaction_by_block(unknown_identifier, 0) # type: ignore # noqa: E501
@pytest.mark.asyncio
async def test_eth_get_balance(self, async_w3: "AsyncWeb3[Any]") -> None:
accounts = await async_w3.eth.accounts
account = accounts[0]
with pytest.raises(InvalidAddress):
await async_w3.eth.get_balance(
ChecksumAddress(HexAddress(HexStr(account.lower())))
)
balance = await async_w3.eth.get_balance(account)
assert is_integer(balance)
assert balance >= 0
@pytest.mark.asyncio
async def test_eth_get_code(
self, async_w3: "AsyncWeb3[Any]", async_math_contract_address: ChecksumAddress
) -> None:
code = await async_w3.eth.get_code(async_math_contract_address)
assert isinstance(code, HexBytes)
assert len(code) > 0
@pytest.mark.asyncio
async def test_eth_get_code_invalid_address(
self,
async_w3: "AsyncWeb3[Any]",
async_math_contract: "AsyncContract",
) -> None:
with pytest.raises(InvalidAddress):
await async_w3.eth.get_code(
ChecksumAddress(HexAddress(HexStr(async_math_contract.address.lower())))
)
@pytest.mark.asyncio
async def test_eth_get_code_with_block_identifier(
self, async_w3: "AsyncWeb3[Any]", async_emitter_contract: "AsyncContract"
) -> None:
block_id = await async_w3.eth.block_number
code = await async_w3.eth.get_code(async_emitter_contract.address, block_id)
assert isinstance(code, HexBytes)
assert len(code) > 0
@pytest.mark.asyncio
async def test_eth_create_access_list(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
async_math_contract: "AsyncContract",
) -> None:
# build txn
txn = await async_math_contract.functions.incrementCounter(1).build_transaction(
{"from": async_keyfile_account_address_dual_type}
)
# create access list
response = await async_w3.eth.create_access_list(txn)
assert is_dict(response)
access_list = response["accessList"]
assert len(access_list) > 0
assert access_list[0]["address"] is not None
assert is_checksum_address(access_list[0]["address"])
assert len(access_list[0]["storageKeys"][0]) == 66
assert int(response["gasUsed"]) >= 0
# assert the result can be used directly in a transaction dict
txn["accessList"] = response["accessList"]
txn["gas"] = response["gasUsed"]
# send txn with access list
await async_w3.eth.send_transaction(txn)
@pytest.mark.asyncio
async def test_eth_get_transaction_count(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
transaction_count = await async_w3.eth.get_transaction_count(
async_keyfile_account_address_dual_type
)
assert is_integer(transaction_count)
assert transaction_count >= 0
@pytest.mark.asyncio
async def test_eth_call(
self, async_w3: "AsyncWeb3[Any]", async_math_contract: "AsyncContract"
) -> None:
accounts = await async_w3.eth.accounts
account = accounts[0]
txn_params = async_math_contract._prepare_transaction(
abi_element_identifier="add",
fn_args=(7, 11),
transaction={"from": account, "to": async_math_contract.address},
)
call_result = await async_w3.eth.call(txn_params)
assert is_string(call_result)
(result,) = async_w3.codec.decode(["uint256"], call_result)
assert result == 18
@pytest.mark.asyncio
async def test_eth_call_with_override_code(
self,
async_w3: "AsyncWeb3[Any]",
async_revert_contract: "AsyncContract",
) -> None:
accounts = await async_w3.eth.accounts
account = accounts[0]
txn_params = async_revert_contract._prepare_transaction(
abi_element_identifier="normalFunction",
transaction={"from": account, "to": async_revert_contract.address},
)
call_result = await async_w3.eth.call(txn_params)
(result,) = async_w3.codec.decode(["bool"], call_result)
assert result is True
# override runtime bytecode: `normalFunction` returns `false`
override_code = HexStr(
"0x6080604052348015600f57600080fd5b5060043610603c5760003560e01c8063185c38a4146041578063c06a97cb146049578063d67e4b84146051575b600080fd5b60476071565b005b604f60df565b005b605760e4565b604051808215151515815260200191505060405180910390f35b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f46756e6374696f6e20686173206265656e2072657665727465642e000000000081525060200191505060405180910390fd5b600080fd5b60008090509056fea2646970667358221220bb71e9e9a2e271cd0fbe833524a3ea67df95f25ea13aef5b0a761fa52b538f1064736f6c63430006010033" # noqa: E501
)
call_result = await async_w3.eth.call(
txn_params,
"latest",
{async_revert_contract.address: {"code": override_code}},
)
(result,) = async_w3.codec.decode(["bool"], call_result)
assert result is False
# test bytes
bytes_call_result = await async_w3.eth.call(
txn_params,
"latest",
{async_revert_contract.address: {"code": to_bytes(hexstr=override_code)}},
)
(bytes_result,) = async_w3.codec.decode(["bool"], bytes_call_result)
assert bytes_result is False
@pytest.mark.asyncio
@pytest.mark.parametrize(
"params",
(
{
"nonce": 1, # int
"balance": 1, # int
"code": HexStr("0x"), # HexStr
# with state
"state": {HexStr(f"0x{'00' * 32}"): HexStr(f"0x{'00' * 32}")},
},
{
"nonce": HexStr("0x1"), # HexStr
"balance": HexStr("0x1"), # HexStr
"code": b"\x00", # bytes
# with stateDiff
"stateDiff": {HexStr(f"0x{'00' * 32}"): HexStr(f"0x{'00' * 32}")},
},
),
)
async def test_eth_call_with_override_param_type_check(
self,
async_w3: "AsyncWeb3[Any]",
async_math_contract: "AsyncContract",
params: StateOverrideParams,
) -> None:
accounts = await async_w3.eth.accounts
txn_params: TxParams = {"from": accounts[0]}
# assert does not raise
await async_w3.eth.call(
txn_params, "latest", {async_math_contract.address: params}
)
@pytest.mark.asyncio
async def test_eth_call_with_0_result(
self, async_w3: "AsyncWeb3[Any]", async_math_contract: "AsyncContract"
) -> None:
accounts = await async_w3.eth.accounts
txn_params = async_math_contract._prepare_transaction(
abi_element_identifier="add",
fn_args=(0, 0),
transaction={"from": accounts[0], "to": async_math_contract.address},
)
call_result = await async_w3.eth.call(txn_params)
assert is_string(call_result)
(result,) = async_w3.codec.decode(["uint256"], call_result)
assert result == 0
@pytest.mark.asyncio
async def test_eth_call_revert_with_msg(
self,
async_w3: "AsyncWeb3[Any]",
async_revert_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
) -> None:
txn_params = async_revert_contract._prepare_transaction(
abi_element_identifier="revertWithMessage",
transaction={
"from": async_keyfile_account_address,
"to": async_revert_contract.address,
},
)
with pytest.raises(
ContractLogicError, match="execution reverted: Function has been reverted"
):
await async_w3.eth.call(txn_params)
@pytest.mark.asyncio
async def test_eth_call_revert_without_msg(
self,
async_w3: "AsyncWeb3[Any]",
async_revert_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
) -> None:
with pytest.raises(ContractLogicError, match="execution reverted"):
txn_params = async_revert_contract._prepare_transaction(
abi_element_identifier="revertWithoutMessage",
transaction={
"from": async_keyfile_account_address,
"to": async_revert_contract.address,
},
)
await async_w3.eth.call(txn_params)
@pytest.mark.asyncio
async def test_eth_call_revert_custom_error_with_msg(
self,
async_w3: "AsyncWeb3[Any]",
async_revert_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
) -> None:
data = async_revert_contract.encode_abi(
abi_element_identifier="UnauthorizedWithMessage",
args=["You are not authorized"],
)
txn_params = async_revert_contract._prepare_transaction(
abi_element_identifier="customErrorWithMessage",
transaction={
"from": async_keyfile_account_address,
"to": async_revert_contract.address,
},
)
with pytest.raises(ContractCustomError, match=data):
await async_w3.eth.call(txn_params)
@pytest.mark.asyncio
async def test_eth_call_revert_custom_error_without_msg(
self,
async_w3: "AsyncWeb3[Any]",
async_revert_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
) -> None:
data = async_revert_contract.encode_abi(abi_element_identifier="Unauthorized")
txn_params = async_revert_contract._prepare_transaction(
abi_element_identifier="customErrorWithoutMessage",
transaction={
"from": async_keyfile_account_address,
"to": async_revert_contract.address,
},
)
with pytest.raises(ContractCustomError, match=data):
await async_w3.eth.call(txn_params)
@pytest.mark.parametrize(
"panic_error,params",
(
("01", []),
("11", []),
("12", [0]),
("21", [-1]),
("22", []),
("31", []),
("32", []),
("41", []),
("51", []),
),
)
@pytest.mark.asyncio
async def test_contract_panic_errors(
self,
async_w3: "AsyncWeb3[Any]",
async_panic_errors_contract: "AsyncContract",
panic_error: str,
params: list[Any],
) -> None:
method = getattr(
async_panic_errors_contract.functions,
f"errorCode{panic_error}",
)
error_msg = PANIC_ERROR_CODES[panic_error]
with pytest.raises(ContractPanicError, match=re.escape(error_msg)):
await method(*params).call()
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup(
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
monkeypatch: "MonkeyPatch",
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/{OFFCHAIN_LOOKUP_TEST_DATA}.json", # noqa: E501
mocked_json_data=WEB3PY_AS_HEXBYTES,
)
response_caller = await async_offchain_lookup_contract.caller().testOffchainLookup( # noqa: E501 type: ignore
OFFCHAIN_LOOKUP_TEST_DATA
)
response_function_call = await async_offchain_lookup_contract.functions.testOffchainLookup( # noqa: E501 type: ignore
OFFCHAIN_LOOKUP_TEST_DATA
).call()
assert async_w3.codec.decode(["string"], response_caller)[0] == "web3py"
assert async_w3.codec.decode(["string"], response_function_call)[0] == "web3py"
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup_raises_when_ccip_read_is_disabled(
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
) -> None:
return_data = (
OFFCHAIN_LOOKUP_4BYTE_DATA
+ abi_encoded_offchain_lookup_contract_address(
async_w3, async_offchain_lookup_contract
)
+ OFFCHAIN_LOOKUP_RETURN_DATA
)
# test AsyncContractCaller
with pytest.raises(OffchainLookup) as e:
await async_offchain_lookup_contract.caller(
ccip_read_enabled=False
).testOffchainLookup(OFFCHAIN_LOOKUP_TEST_DATA)
assert e.value.data == return_data
# test AsyncContractFunction call
with pytest.raises(OffchainLookup) as excinfo:
await async_offchain_lookup_contract.functions.testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
).call(ccip_read_enabled=False)
assert excinfo.value.data == return_data
# test global flag on the provider
async_w3.provider.global_ccip_read_enabled = False
with pytest.raises(OffchainLookup) as exc_info:
await async_offchain_lookup_contract.functions.testOffchainLookup( # noqa: E501 type: ignore
OFFCHAIN_LOOKUP_TEST_DATA
).call()
assert exc_info.value.data == return_data
async_w3.provider.global_ccip_read_enabled = True # cleanup
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup_call_flag_overrides_provider_flag(
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
monkeypatch: "MonkeyPatch",
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/{OFFCHAIN_LOOKUP_TEST_DATA}.json", # noqa: E501
mocked_json_data=WEB3PY_AS_HEXBYTES,
)
async_w3.provider.global_ccip_read_enabled = False
response = await async_offchain_lookup_contract.functions.testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
).call(ccip_read_enabled=True)
assert async_w3.codec.decode(["string"], response)[0] == "web3py"
async_w3.provider.global_ccip_read_enabled = True # cleanup
@pytest.mark.asyncio
@pytest.mark.parametrize("max_redirects", range(-1, 4))
async def test_eth_call_offchain_lookup_raises_if_max_redirects_is_less_than_4(
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
max_redirects: int,
) -> None:
default_max_redirects = async_w3.provider.ccip_read_max_redirects
async_w3.provider.ccip_read_max_redirects = max_redirects
with pytest.raises(Web3ValueError, match="at least 4"):
await async_offchain_lookup_contract.caller().testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
)
async_w3.provider.ccip_read_max_redirects = default_max_redirects # cleanup
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup_raises_for_improperly_formatted_rest_request_response( # noqa: E501
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
monkeypatch: "MonkeyPatch",
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/{OFFCHAIN_LOOKUP_TEST_DATA}.json", # noqa: E501
mocked_json_data=WEB3PY_AS_HEXBYTES,
json_data_field="not_data",
)
with pytest.raises(Web3ValidationError, match="missing 'data' field"):
await async_offchain_lookup_contract.caller().testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
)
@pytest.mark.asyncio
@pytest.mark.parametrize("status_code_non_4xx_error", [100, 300, 500, 600])
async def test_eth_call_offchain_lookup_tries_next_url_for_non_4xx_error_status_and_tests_POST( # noqa: E501
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
monkeypatch: "MonkeyPatch",
status_code_non_4xx_error: int,
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
# The next url in our test contract doesn't contain '{data}', triggering
# the POST request logic. The idea here is to return a bad status for the
# first url (GET) and a success status for the second call (POST) to test
# both that we move on to the next url with non-4xx status and that the
# POST logic is also working as expected.
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/{OFFCHAIN_LOOKUP_TEST_DATA}.json", # noqa: E501
mocked_status_code=status_code_non_4xx_error,
mocked_json_data=WEB3PY_AS_HEXBYTES,
)
async_mock_offchain_lookup_request_response(
monkeypatch,
http_method="POST",
mocked_request_url="https://web3.py/gateway",
mocked_status_code=200,
mocked_json_data=WEB3PY_AS_HEXBYTES,
sender=normalized_contract_address,
calldata=OFFCHAIN_LOOKUP_TEST_DATA,
)
response = await async_offchain_lookup_contract.caller().testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
)
assert async_w3.codec.decode(["string"], response)[0] == "web3py"
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup_calls_raise_for_status_for_4xx_status_code(
self,
async_w3: "AsyncWeb3[Any]",
async_offchain_lookup_contract: "AsyncContract",
async_keyfile_account_address: ChecksumAddress,
monkeypatch: "MonkeyPatch",
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/{OFFCHAIN_LOOKUP_TEST_DATA}.json", # noqa: E501
mocked_status_code=randint(400, 499),
mocked_json_data=WEB3PY_AS_HEXBYTES,
)
with pytest.raises(Exception, match="called raise_for_status\\(\\)"):
await async_offchain_lookup_contract.caller().testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
)
@pytest.mark.asyncio
async def test_eth_call_offchain_lookup_raises_when_all_supplied_urls_fail(
self,
async_offchain_lookup_contract: "AsyncContract",
) -> None:
# GET and POST requests should fail since responses are not mocked
with pytest.raises(
MultipleFailedRequests, match="Offchain lookup failed for supplied urls"
):
await async_offchain_lookup_contract.caller().testOffchainLookup(
OFFCHAIN_LOOKUP_TEST_DATA
)
@pytest.mark.asyncio
async def test_eth_call_continuous_offchain_lookup_raises_with_too_many_requests(
self,
async_offchain_lookup_contract: "AsyncContract",
monkeypatch: "MonkeyPatch",
) -> None:
normalized_contract_address = to_hex_if_bytes(
async_offchain_lookup_contract.address
).lower()
async_mock_offchain_lookup_request_response(
monkeypatch,
mocked_request_url=f"https://web3.py/gateway/{normalized_contract_address}/0x.json", # noqa: E501
)
with pytest.raises(TooManyRequests, match="Too many CCIP read redirects"):
await async_offchain_lookup_contract.caller().continuousOffchainLookup() # noqa: E501 type: ignore
@pytest.mark.asyncio
async def test_eth_simulate_v1(self, async_w3: "AsyncWeb3[Any]") -> None:
simulate_result = await async_w3.eth.simulate_v1(
{
"blockStateCalls": [
{
"blockOverrides": {
"baseFeePerGas": Wei(10),
},
"stateOverrides": {
"0xc100000000000000000000000000000000000000": {
"balance": Wei(500000000),
}
},
"calls": [
{
"from": "0xc100000000000000000000000000000000000000",
"to": "0xc100000000000000000000000000000000000000",
"maxFeePerGas": Wei(10),
"maxPriorityFeePerGas": Wei(10),
}
],
}
],
"validation": True,
"traceTransfers": True,
},
"latest",
)
assert len(simulate_result) == 1
result = simulate_result[0]
assert result.get("baseFeePerGas") == 10
calls_result = result.get("calls")
assert calls_result is not None
assert len(calls_result) == 1
call_entry = calls_result[0]
assert all(
key in call_entry for key in ("returnData", "logs", "gasUsed", "status")
)
assert call_entry["status"] == 1
assert call_entry["gasUsed"] == int("0x5208", 16)
@pytest.mark.asyncio
async def test_async_eth_chain_id(self, async_w3: "AsyncWeb3[Any]") -> None:
chain_id = await async_w3.eth.chain_id
# chain id value from geth fixture genesis file
assert chain_id == 131277322940537
@pytest.mark.asyncio
async def test_async_eth_get_transaction_receipt_mined(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn: BlockData,
mined_txn_hash: HexStr,
) -> None:
receipt = await async_w3.eth.get_transaction_receipt(mined_txn_hash)
assert is_dict(receipt)
assert receipt["blockNumber"] == async_block_with_txn["number"]
assert receipt["blockHash"] == async_block_with_txn["hash"]
assert receipt["transactionIndex"] == 0
assert receipt["transactionHash"] == HexBytes(mined_txn_hash)
assert is_checksum_address(receipt["to"])
assert receipt["from"] is not None
assert is_checksum_address(receipt["from"])
effective_gas_price = receipt["effectiveGasPrice"]
assert isinstance(effective_gas_price, int)
assert effective_gas_price > 0
@pytest.mark.asyncio
async def test_async_eth_get_transaction_receipt_unmined(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_hash = await async_w3.eth.send_transaction(
{
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(3, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
)
with pytest.raises(TransactionNotFound):
await async_w3.eth.get_transaction_receipt(txn_hash)
@pytest.mark.asyncio
async def test_async_eth_get_transaction_receipt_with_log_entry(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn_with_log: BlockData,
async_emitter_contract: "AsyncContract",
txn_hash_with_log: HexStr,
) -> None:
receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash_with_log)
assert is_dict(receipt)
assert receipt["blockNumber"] == async_block_with_txn_with_log["number"]
assert receipt["blockHash"] == async_block_with_txn_with_log["hash"]
assert receipt["transactionIndex"] == 0
assert receipt["transactionHash"] == HexBytes(txn_hash_with_log)
assert len(receipt["logs"]) == 1
log_entry = receipt["logs"][0]
assert log_entry["blockNumber"] == async_block_with_txn_with_log["number"]
assert log_entry["blockHash"] == async_block_with_txn_with_log["hash"]
assert log_entry["logIndex"] == 0
assert is_same_address(log_entry["address"], async_emitter_contract.address)
assert log_entry["transactionIndex"] == 0
assert log_entry["transactionHash"] == HexBytes(txn_hash_with_log)
@pytest.mark.asyncio
async def test_async_eth_wait_for_transaction_receipt_mined(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn: BlockData,
mined_txn_hash: HexStr,
) -> None:
receipt = await async_w3.eth.wait_for_transaction_receipt(mined_txn_hash)
assert is_dict(receipt)
assert receipt["blockNumber"] == async_block_with_txn["number"]
assert receipt["blockHash"] == async_block_with_txn["hash"]
assert receipt["transactionIndex"] == 0
assert receipt["transactionHash"] == HexBytes(mined_txn_hash)
assert is_checksum_address(receipt["to"])
assert receipt["from"] is not None
assert is_checksum_address(receipt["from"])
effective_gas_price = receipt["effectiveGasPrice"]
assert isinstance(effective_gas_price, int)
assert effective_gas_price > 0
@pytest.mark.asyncio
# TODO: Remove xfail when issue has been identified
@pytest.mark.xfail(
reason="latest geth seems to cause this to be flaky", strict=False
)
async def test_async_eth_wait_for_transaction_receipt_unmined(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_hash = await async_w3.eth.send_transaction(
{
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(3, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
)
timeout = 0.01
with pytest.raises(TimeExhausted) as exc_info:
await async_w3.eth.wait_for_transaction_receipt(txn_hash, timeout=timeout)
assert (_ in str(exc_info) for _ in [repr(txn_hash), timeout])
@pytest.mark.asyncio
async def test_async_eth_wait_for_transaction_receipt_with_log_entry(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn_with_log: BlockData,
async_emitter_contract: "AsyncContract",
txn_hash_with_log: HexStr,
) -> None:
receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash_with_log)
assert is_dict(receipt)
assert receipt["blockNumber"] == async_block_with_txn_with_log["number"]
assert receipt["blockHash"] == async_block_with_txn_with_log["hash"]
assert receipt["transactionIndex"] == 0
assert receipt["transactionHash"] == HexBytes(txn_hash_with_log)
assert len(receipt["logs"]) == 1
log_entry = receipt["logs"][0]
assert log_entry["blockNumber"] == async_block_with_txn_with_log["number"]
assert log_entry["blockHash"] == async_block_with_txn_with_log["hash"]
assert log_entry["logIndex"] == 0
assert is_same_address(log_entry["address"], async_emitter_contract.address)
assert log_entry["transactionIndex"] == 0
assert log_entry["transactionHash"] == HexBytes(txn_hash_with_log)
@pytest.mark.asyncio
async def test_async_eth_accounts(self, async_w3: "AsyncWeb3[Any]") -> None:
accounts = await async_w3.eth.accounts
assert is_list_like(accounts)
assert len(accounts) != 0
assert all(is_checksum_address(account) for account in accounts)
@pytest.mark.asyncio
async def test_async_eth_blob_base_fee(self, async_w3: "AsyncWeb3[Any]") -> None:
blob_base_fee = await async_w3.eth.blob_base_fee
assert is_integer(blob_base_fee)
assert blob_base_fee >= 0
@pytest.mark.asyncio
async def test_async_eth_get_logs_without_logs(
self, async_w3: "AsyncWeb3[Any]", async_block_with_txn_with_log: BlockData
) -> None:
# Test with block range
filter_params: FilterParams = {
"fromBlock": BlockNumber(0),
"toBlock": BlockNumber(async_block_with_txn_with_log["number"] - 1),
}
result = await async_w3.eth.get_logs(filter_params)
assert len(result) == 0
# the range is wrong
filter_params = {
"fromBlock": async_block_with_txn_with_log["number"],
"toBlock": BlockNumber(async_block_with_txn_with_log["number"] - 1),
}
with pytest.raises(Web3RPCError):
result = await async_w3.eth.get_logs(filter_params)
# Test with `address`
# filter with other address
filter_params = {
"fromBlock": BlockNumber(0),
"address": UNKNOWN_ADDRESS,
}
result = await async_w3.eth.get_logs(filter_params)
assert len(result) == 0
# Test with multiple `address`
# filter with other address
filter_params = {
"fromBlock": BlockNumber(0),
"address": [UNKNOWN_ADDRESS, UNKNOWN_ADDRESS],
}
result = await async_w3.eth.get_logs(filter_params)
assert len(result) == 0
@pytest.mark.asyncio
async def test_async_eth_get_logs_with_logs(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn_with_log: BlockData,
async_emitter_contract_address: ChecksumAddress,
txn_hash_with_log: HexStr,
) -> None:
# Test with block range
# the range includes the block where the log resides in
filter_params: FilterParams = {
"fromBlock": async_block_with_txn_with_log["number"],
"toBlock": async_block_with_txn_with_log["number"],
}
result = await async_w3.eth.get_logs(filter_params)
assert_contains_log(
result,
async_block_with_txn_with_log,
async_emitter_contract_address,
txn_hash_with_log,
)
# specify only `from_block`. by default `to_block` should be 'latest'
filter_params = {
"fromBlock": BlockNumber(0),
}
result = await async_w3.eth.get_logs(filter_params)
assert_contains_log(
result,
async_block_with_txn_with_log,
async_emitter_contract_address,
txn_hash_with_log,
)
# Test with `address`
# filter with emitter_contract.address
filter_params = {
"fromBlock": BlockNumber(0),
"address": async_emitter_contract_address,
}
result = await async_w3.eth.get_logs(filter_params)
assert_contains_log(
result,
async_block_with_txn_with_log,
async_emitter_contract_address,
txn_hash_with_log,
)
@pytest.mark.asyncio
async def test_async_eth_get_logs_with_logs_topic_args(
self,
async_w3: "AsyncWeb3[Any]",
async_block_with_txn_with_log: BlockData,
async_emitter_contract_address: ChecksumAddress,
txn_hash_with_log: HexStr,
) -> None:
# Test with None event sig
filter_params: FilterParams = {
"fromBlock": BlockNumber(0),
"topics": [
None,
HexStr(
"0x000000000000000000000000000000000000000000000000000000000000d431"
),
],
}
result = await async_w3.eth.get_logs(filter_params)
assert_contains_log(
result,
async_block_with_txn_with_log,
async_emitter_contract_address,
txn_hash_with_log,
)
# Test with None indexed arg
filter_params = {
"fromBlock": BlockNumber(0),
"topics": [
HexStr(
"0x057bc32826fbe161da1c110afcdcae7c109a8b69149f727fc37a603c60ef94ca"
),
None,
],
}
result = await async_w3.eth.get_logs(filter_params)
assert_contains_log(
result,
async_block_with_txn_with_log,
async_emitter_contract_address,
txn_hash_with_log,
)
@pytest.mark.asyncio
async def test_async_eth_get_logs_with_logs_none_topic_args(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
# Test with None overflowing
filter_params: FilterParams = {
"fromBlock": BlockNumber(0),
"topics": [None, None, None, None],
}
result = await async_w3.eth.get_logs(filter_params)
assert len(result) == 0
@pytest.mark.asyncio
async def test_async_eth_syncing(self, async_w3: "AsyncWeb3[Any]") -> None:
syncing = await async_w3.eth.syncing
assert is_boolean(syncing) or is_dict(syncing)
if is_boolean(syncing):
assert syncing is False
elif is_dict(syncing):
sync_dict = cast(SyncStatus, syncing)
assert "startingBlock" in sync_dict
assert "currentBlock" in sync_dict
assert "highestBlock" in sync_dict
assert is_integer(sync_dict["startingBlock"])
assert is_integer(sync_dict["currentBlock"])
assert is_integer(sync_dict["highestBlock"])
@pytest.mark.asyncio
async def test_async_eth_get_storage_at(
self, async_w3: "AsyncWeb3[Any]", async_storage_contract: "AsyncContract"
) -> None:
async_storage_contract_address = async_storage_contract.address
slot_0 = await async_w3.eth.get_storage_at(async_storage_contract_address, 0)
assert slot_0 == HexBytes(f"0x{'00' * 32}")
slot_1 = await async_w3.eth.get_storage_at(async_storage_contract_address, 1)
assert slot_1 == HexBytes(f"0x{'00' * 31}01")
slot_2 = await async_w3.eth.get_storage_at(async_storage_contract_address, 2)
assert slot_2 == HexBytes(f"0x{'00' * 31}02")
slot_3 = await async_w3.eth.get_storage_at(async_storage_contract_address, 3)
assert slot_3 == HexBytes(
"0x746872656500000000000000000000000000000000000000000000000000000a"
)
assert bytes(slot_3[:5]) == b"three"
slot_4 = await async_w3.eth.get_storage_at(async_storage_contract_address, 4)
assert slot_4 == HexBytes(
"0x666f757200000000000000000000000000000000000000000000000000000008"
)
assert bytes(slot_4[:4]) == b"four"
@pytest.mark.asyncio
async def test_async_eth_get_storage_at_ens_name(
self, async_w3: "AsyncWeb3[Any]", async_storage_contract: "AsyncContract"
) -> None:
with ens_addresses(async_w3, {"storage.eth": async_storage_contract.address}):
storage = await async_w3.eth.get_storage_at(ENS("storage.eth"), 1)
assert storage == HexBytes(f"0x{'00' * 31}01")
@pytest.mark.asyncio
async def test_async_eth_get_storage_at_invalid_address(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
accounts = await async_w3.eth.accounts
with pytest.raises(InvalidAddress):
await async_w3.eth.get_storage_at(
ChecksumAddress(HexAddress(HexStr(accounts[0].lower()))), 0
)
def test_async_provider_default_account(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
current_default_account = async_w3.eth.default_account
# check setter
async_w3.eth.default_account = async_keyfile_account_address_dual_type
default_account = async_w3.eth.default_account
assert default_account == async_keyfile_account_address_dual_type
# reset to default
async_w3.eth.default_account = current_default_account
def test_async_provider_default_block(
self,
async_w3: "AsyncWeb3[Any]",
) -> None:
# check defaults to 'latest'
default_block = async_w3.eth.default_block
assert default_block == "latest"
# check setter
async_w3.eth.default_block = BlockNumber(12345)
default_block = async_w3.eth.default_block
assert default_block == BlockNumber(12345)
# reset to default
async_w3.eth.default_block = "latest"
@pytest.mark.asyncio
async def test_eth_getBlockTransactionCountByHash_async_empty_block(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
transaction_count = await async_w3.eth.get_block_transaction_count(
async_empty_block["hash"]
)
assert is_integer(transaction_count)
assert transaction_count == 0
@pytest.mark.asyncio
async def test_eth_getBlockTransactionCountByNumber_async_empty_block(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
transaction_count = await async_w3.eth.get_block_transaction_count(
async_empty_block["number"]
)
assert is_integer(transaction_count)
assert transaction_count == 0
@pytest.mark.asyncio
async def test_eth_getBlockTransactionCountByHash_block_with_txn(
self, async_w3: "AsyncWeb3[Any]", async_block_with_txn: BlockData
) -> None:
transaction_count = await async_w3.eth.get_block_transaction_count(
async_block_with_txn["hash"]
)
assert is_integer(transaction_count)
assert transaction_count >= 1
@pytest.mark.asyncio
async def test_eth_getUncleCountByBlockHash(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
with pytest.warns(
DeprecationWarning,
match=r"get_uncle_count is deprecated: all get_uncle\* "
r"methods will be removed in v8",
):
uncle_count = await async_w3.eth.get_uncle_count(async_empty_block["hash"])
assert is_integer(uncle_count)
assert uncle_count == 0
@pytest.mark.asyncio
async def test_eth_getUncleCountByBlockNumber(
self, async_w3: "AsyncWeb3[Any]", async_empty_block: BlockData
) -> None:
with pytest.warns(
DeprecationWarning,
match=r"get_uncle_count is deprecated: all get_uncle\* "
r"methods will be removed in v8",
):
uncle_count = await async_w3.eth.get_uncle_count(
async_empty_block["number"]
)
assert is_integer(uncle_count)
assert uncle_count == 0
@pytest.mark.asyncio
async def test_eth_getBlockTransactionCountByNumber_block_with_txn(
self, async_w3: "AsyncWeb3[Any]", async_block_with_txn: BlockData
) -> None:
transaction_count = await async_w3.eth.get_block_transaction_count(
async_block_with_txn["number"]
)
assert is_integer(transaction_count)
assert transaction_count >= 1
@pytest.mark.asyncio
async def test_async_eth_sign(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
signature = await async_w3.eth.sign(
async_keyfile_account_address_dual_type,
text="Message tö sign. Longer than hash!",
)
assert is_bytes(signature)
assert len(signature) == 32 + 32 + 1
# test other formats
hexsign = await async_w3.eth.sign(
async_keyfile_account_address_dual_type,
hexstr=HexStr(
"0x4d6573736167652074c3b6207369676e2e204c6f6e676572207468616e206861736821" # noqa: E501
),
)
assert hexsign == signature
intsign = await async_w3.eth.sign(
async_keyfile_account_address_dual_type,
0x4D6573736167652074C3B6207369676E2E204C6F6E676572207468616E206861736821,
)
assert intsign == signature
bytessign = await async_w3.eth.sign(
async_keyfile_account_address_dual_type,
b"Message t\xc3\xb6 sign. Longer than hash!",
)
assert bytessign == signature
new_signature = await async_w3.eth.sign(
async_keyfile_account_address_dual_type,
text="different message is different",
)
assert new_signature != signature
@pytest.mark.asyncio
async def test_async_eth_sign_ens_names(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
with ens_addresses(
async_w3, {"unlocked-acct.eth": async_keyfile_account_address_dual_type}
):
signature = await async_w3.eth.sign(
ENS("unlocked-acct.eth"), text="Message tö sign. Longer than hash!"
)
assert is_bytes(signature)
assert len(signature) == 32 + 32 + 1
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_legacy(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"gasPrice": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn_params["gasPrice"] = async_w3.to_wei(2, "gwei")
replace_txn_hash = await async_w3.eth.replace_transaction(txn_hash, txn_params)
replace_txn = await async_w3.eth.get_transaction(replace_txn_hash)
assert is_same_address(
replace_txn["from"], cast(ChecksumAddress, txn_params["from"])
)
assert is_same_address(
replace_txn["to"], cast(ChecksumAddress, txn_params["to"])
)
assert replace_txn["value"] == 1
assert replace_txn["gas"] == 21000
assert replace_txn["gasPrice"] == txn_params["gasPrice"]
@pytest.mark.asyncio
async def test_async_eth_replace_transaction(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
two_gwei_in_wei = async_w3.to_wei(2, "gwei")
three_gwei_in_wei = async_w3.to_wei(3, "gwei")
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": two_gwei_in_wei,
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn_params["maxFeePerGas"] = three_gwei_in_wei
txn_params["maxPriorityFeePerGas"] = two_gwei_in_wei
replace_txn_hash = await async_w3.eth.replace_transaction(txn_hash, txn_params)
replace_txn = await async_w3.eth.get_transaction(replace_txn_hash)
assert is_same_address(
replace_txn["from"], cast(ChecksumAddress, txn_params["from"])
)
assert is_same_address(
replace_txn["to"], cast(ChecksumAddress, txn_params["to"])
)
assert replace_txn["value"] == 1
assert replace_txn["gas"] == 21000
assert replace_txn["maxFeePerGas"] == three_gwei_in_wei
assert replace_txn["maxPriorityFeePerGas"] == two_gwei_in_wei
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_underpriced(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
# Note: `underpriced transaction` error is only consistent with
# ``txpool.nolocals`` flag as of Geth ``v1.15.4``.
# https://github.com/ethereum/web3.py/pull/3636
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(3, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(2, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
one_gwei_in_wei = async_w3.to_wei(1, "gwei")
txn_params["maxFeePerGas"] = one_gwei_in_wei
txn_params["maxPriorityFeePerGas"] = one_gwei_in_wei
with pytest.raises(Web3RPCError, match="replacement transaction underpriced"):
await async_w3.eth.replace_transaction(txn_hash, txn_params)
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_non_existing_transaction(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(3, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
with pytest.raises(TransactionNotFound):
await async_w3.eth.replace_transaction(
HexStr(
"0x98e8cc09b311583c5079fa600f6c2a3bea8611af168c52e4b60b5b243a441997"
),
txn_params,
)
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_already_mined(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
await async_w3.eth.wait_for_transaction_receipt(txn_hash, timeout=10)
txn_params["maxFeePerGas"] = async_w3.to_wei(3, "gwei")
txn_params["maxPriorityFeePerGas"] = async_w3.to_wei(2, "gwei")
with pytest.raises(Web3ValueError, match="Supplied transaction with hash"):
await async_w3.eth.replace_transaction(txn_hash, txn_params)
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_incorrect_nonce(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"maxFeePerGas": async_w3.to_wei(2, "gwei"),
"maxPriorityFeePerGas": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn = await async_w3.eth.get_transaction(txn_hash)
txn_params["maxFeePerGas"] = async_w3.to_wei(3, "gwei")
txn_params["maxPriorityFeePerGas"] = async_w3.to_wei(2, "gwei")
txn_params["nonce"] = Nonce(txn["nonce"] + 1)
with pytest.raises(Web3ValueError):
await async_w3.eth.replace_transaction(txn_hash, txn_params)
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_gas_price_too_low(
self,
async_w3: "AsyncWeb3[Any]",
async_keyfile_account_address_dual_type: ChecksumAddress,
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address_dual_type,
"to": async_keyfile_account_address_dual_type,
"value": Wei(1),
"gas": 21000,
"gasPrice": async_w3.to_wei(2, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn_params["gasPrice"] = async_w3.to_wei(1, "gwei")
with pytest.raises(Web3ValueError):
await async_w3.eth.replace_transaction(txn_hash, txn_params)
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_gas_price_defaulting_minimum(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
gas_price = async_w3.to_wei(1, "gwei")
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"gasPrice": gas_price,
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
txn_params.pop("gasPrice")
replace_txn_hash = await async_w3.eth.replace_transaction(txn_hash, txn_params)
replace_txn = await async_w3.eth.get_transaction(replace_txn_hash)
assert replace_txn["gasPrice"] == math.ceil(
gas_price * 1.125
) # minimum gas price
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_gas_price_defaulting_strategy_higher(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"gasPrice": async_w3.to_wei(1, "gwei"),
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
two_gwei_in_wei = async_w3.to_wei(2, "gwei")
def higher_gas_price_strategy(
_async_w3: "AsyncWeb3[Any]", _txn: TxParams
) -> Wei:
return two_gwei_in_wei
async_w3.eth.set_gas_price_strategy(higher_gas_price_strategy)
txn_params.pop("gasPrice")
replace_txn_hash = await async_w3.eth.replace_transaction(txn_hash, txn_params)
replace_txn = await async_w3.eth.get_transaction(replace_txn_hash)
assert (
replace_txn["gasPrice"] == two_gwei_in_wei
) # Strategy provides higher gas price
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
async def test_async_eth_replace_transaction_gas_price_defaulting_strategy_lower(
self, async_w3: "AsyncWeb3[Any]", async_keyfile_account_address: ChecksumAddress
) -> None:
gas_price = async_w3.to_wei(2, "gwei")
txn_params: TxParams = {
"from": async_keyfile_account_address,
"to": async_keyfile_account_address,
"value": Wei(1),
"gas": 21000,
"gasPrice": gas_price,
}
txn_hash = await async_w3.eth.send_transaction(txn_params)
def lower_gas_price_strategy(async_w3: "AsyncWeb3[Any]", txn: TxParams) -> Wei:
return async_w3.to_wei(1, "gwei")
async_w3.eth.set_gas_price_strategy(lower_gas_price_strategy)
txn_params.pop("gasPrice")
replace_txn_hash = await async_w3.eth.replace_transaction(txn_hash, txn_params)
replace_txn = await async_w3.eth.get_transaction(replace_txn_hash)
# Strategy provides lower gas price - minimum preferred
assert replace_txn["gasPrice"] == math.ceil(gas_price * 1.125)
async_w3.eth.set_gas_price_strategy(None) # reset strategy
@pytest.mark.asyncio
async def test_async_eth_new_filter(self, async_w3: "AsyncWeb3[Any]") -> None:
filter = await async_w3.eth.filter({})
changes = await async_w3.eth.get_filter_changes(filter.filter_id)
assert is_list_like(changes)
assert not changes
logs = await async_w3.eth.get_filter_logs(filter.filter_id)
assert is_list_like(logs)
assert not logs
result = await async_w3.eth.uninstall_filter(filter.filter_id)
assert result is True
@pytest.mark.asyncio
async def test_async_eth_new_block_filter(self, async_w3: "AsyncWeb3[Any]") -> None:
filter = await async_w3.eth.filter("latest")
assert is_string(filter.filter_id)
changes = await async_w3.eth.get_filter_changes(filter.filter_id)
assert is_list_like(changes)
result = await async_w3.eth.uninstall_filter(filter.filter_id)
assert result is True
@pytest.mark.asyncio
async def test_async_eth_new_pending_transaction_filter(
self, async_w3: "AsyncWeb3[Any]"
) -> None:
filter = await async_w3.eth.filter("pending")
assert is_string(filter.filter_id)
changes = await async_w3.eth.get_filter_changes(filter.filter_id)
assert is_list_like(changes)
assert not changes
result = await async_w3.eth.uninstall_filter(filter.filter_id)
assert result is True
@pytest.mark.asyncio
async def test_async_eth_uninstall_filter(self, async_w3: "AsyncWeb3[Any]") -> None:
filter = await async_w3.eth.filter({})
assert is_string(filter.filter_id)
success = await async_w3.eth.uninstall_filter(filter.filter_id)
assert success is True
failure = await async_w3.eth.uninstall_filter(filter.filter_id)
assert failure is False
| AsyncEthModuleTest |
python | bottlepy__bottle | bottle.py | {
"start": 7466,
"end": 7809
} | class ____(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
| BottleException |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto-table.py | {
"start": 806,
"end": 1214
} | class ____(DataTable):
def __init__(self):
super().__init__()
self.cursor_type = "row"
self.show_cursor = False
self.add_column("Foo")
self.add_column("Bar")
self.add_column("Baz")
for _ in range(50):
self.add_row(
"ABCDEFGH",
"0123456789",
"IJKLMNOPQRSTUVWXYZ",
)
| StatusTable |
python | getsentry__sentry | tests/sentry/sentry_apps/external_issues/test_issue_link_creator.py | {
"start": 429,
"end": 2690
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(name="foo")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(slug="boop", organization=self.org)
self.group = self.create_group(project=self.project)
self.sentry_app = self.create_sentry_app(
name="foo", organization=self.org, webhook_url="https://example.com", scopes=()
)
self.orm_install = self.create_sentry_app_installation(
slug="foo", organization=self.org, user=self.user
)
self.install = app_service.get_many(filter=dict(installation_ids=[self.orm_install.id]))[0]
@responses.activate
def test_creates_external_issue(self) -> None:
fields = {"title": "An Issue", "description": "a bug was found", "assignee": "user-1"}
responses.add(
method=responses.POST,
url="https://example.com/link-issue",
json={
"project": "Projectname",
"webUrl": "https://example.com/project/issue-id",
"identifier": "issue-1",
},
status=200,
content_type="application/json",
)
result = IssueLinkCreator(
install=self.install,
group=self.group,
action="create",
uri="/link-issue",
fields=fields,
user=serialize_rpc_user(self.user),
).run()
external_issue = PlatformExternalIssue.objects.all()[0]
assert result == external_issue
assert external_issue.group_id == self.group.id
assert external_issue.project_id == self.group.project.id
assert external_issue.web_url == "https://example.com/project/issue-id"
assert external_issue.display_name == "Projectname#issue-1"
def test_invalid_action(self) -> None:
with pytest.raises(SentryAppSentryError):
IssueLinkCreator(
install=self.install,
group=self.group,
action="doop",
uri="/link-issue",
fields={},
user=serialize_rpc_user(self.user),
).run()
| TestIssueLinkCreator |
python | getsentry__sentry | tests/sentry/sudo/test_utils.py | {
"start": 251,
"end": 1356
} | class ____(BaseTestCase):
def assertRequestHasToken(self, request, max_age):
token = request.session[COOKIE_NAME]
self.assertRegex(token, r"^\w{12}$")
self.assertTrue(request._sudo)
self.assertEqual(request._sudo_token, token)
self.assertEqual(request._sudo_max_age, max_age)
def test_grant_token_not_logged_in(self) -> None:
with pytest.raises(ValueError):
grant_sudo_privileges(self.request)
def test_grant_token_default_max_age(self) -> None:
self.login()
token = grant_sudo_privileges(self.request)
self.assertIsNotNone(token)
self.assertRequestHasToken(self.request, COOKIE_AGE)
def test_grant_token_explicit_max_age(self) -> None:
self.login()
token = grant_sudo_privileges(self.request, 60)
self.assertIsNotNone(token)
self.assertRequestHasToken(self.request, 60)
def test_without_user(self) -> None:
delattr(self.request, "user")
token = grant_sudo_privileges(self.request)
self.assertIsNone(token)
| GrantSudoPrivilegesTestCase |
python | django__django | tests/generic_views/test_base.py | {
"start": 24444,
"end": 25152
} | class ____(SimpleTestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
msg = (
"SingleObjectTemplateResponseMixin requires a definition "
"of 'template_name', 'template_name_field', or 'model'; "
"or an implementation of 'get_template_names()'."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
view.get_template_names()
| SingleObjectTemplateResponseMixinTest |
python | pytorch__pytorch | test/test_mps.py | {
"start": 370577,
"end": 371983
} | class ____(TestCaseMPS):
@parametrize("reduction", ["none", "mean", "sum"])
@parametrize("requires_grad", [False, True])
def test_smooth_l1_loss(self, reduction, requires_grad):
def helper(sizes):
# CPU
input_cpu = torch.randn(*sizes, requires_grad=requires_grad)
target_cpu = torch.randn(*sizes)
# MPS
input_mps = input_cpu.detach().clone().to('mps').requires_grad_()
target_mps = target_cpu.detach().clone().to('mps')
smooth_l1_loss_cpu = F.smooth_l1_loss(input_cpu, target_cpu, beta=1.0, reduction=reduction)
smooth_l1_loss_mps = F.smooth_l1_loss(input_mps, target_mps, beta=1.0, reduction=reduction)
self.assertEqual(smooth_l1_loss_cpu, smooth_l1_loss_mps)
if requires_grad:
if reduction == "none":
grad_cpu = torch.zeros_like(smooth_l1_loss_cpu)
grad_mps = grad_cpu.to('mps')
smooth_l1_loss_cpu.backward(grad_cpu)
smooth_l1_loss_mps.backward(grad_mps)
else:
smooth_l1_loss_cpu.backward()
smooth_l1_loss_mps.backward()
self.assertEqual(input_cpu.grad, input_mps.grad.to("cpu"))
helper((2, 3, 4))
helper((8, 5))
helper((3, ))
helper((3, 3, 0))
| TestSmoothL1Loss |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/libelf/package.py | {
"start": 217,
"end": 692
} | class ____(Package):
homepage = "http://www.mr511.de/software/english.html"
url = "http://www.mr511.de/software/libelf-0.8.13.tar.gz"
version("0.8.13", md5="4136d7b4c04df68b686570afa26988ac")
version("0.8.12", md5="e21f8273d9f5f6d43a59878dc274fec7")
version("0.8.10", md5="9db4d36c283d9790d8fa7df1f4d7b4d9")
patch("local.patch", when="@0.8.10")
depends_on("c", type="build")
def install(self, spec, prefix):
touch(prefix.libelf)
| Libelf |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/powerbi.py | {
"start": 13753,
"end": 16371
} | class ____(BasePowerBITrigger):
"""
Triggers a call to the API to request the available dataset IDs.
:param conn_id: The connection Id to connect to PowerBI.
:param group_id: The group Id to list discoverable datasets.
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter`. Default is 1 week (60s * 60m * 24h * 7d).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
"""
def __init__(
self,
conn_id: str,
group_id: str,
dataset_ids: list[str] | None = None,
timeout: float = 60 * 60 * 24 * 7,
proxies: dict | None = None,
api_version: APIVersion | str | None = None,
):
super().__init__(conn_id=conn_id, timeout=timeout, proxies=proxies, api_version=api_version)
self.group_id = group_id
self.dataset_ids = dataset_ids
def serialize(self):
"""Serialize the trigger instance."""
return (
f"{self.__class__.__module__}.{self.__class__.__name__}",
{
"conn_id": self.conn_id,
"proxies": self.proxies,
"api_version": self.api_version,
"timeout": self.timeout,
"group_id": self.group_id,
"dataset_ids": self.dataset_ids,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to the PowerBI and polls for the list of dataset IDs."""
# Trigger the API to get the dataset list
dataset_ids = await self.hook.get_dataset_list(
group_id=self.group_id,
)
if dataset_ids:
self.log.info("Triggered request to get dataset list.")
yield TriggerEvent(
{
"status": "success",
"message": f"The dataset list get request from workspace {self.group_id} has been successful.",
"dataset_ids": dataset_ids,
}
)
return
yield TriggerEvent(
{
"status": "error",
"message": "Error grabbing the dataset list.",
"dataset_ids": None,
}
)
return
| PowerBIDatasetListTrigger |
python | neetcode-gh__leetcode | python/0069-sqrtx.py | {
"start": 0,
"end": 305
} | class ____:
def mySqrt(self, x: int) -> int:
l, r = 0, x
while l <= r:
mid = (l + r) // 2
if mid * mid == x:
return mid
if mid * mid < x:
l = mid + 1
else:
r = mid - 1
return r
| Solution |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 11365,
"end": 11701
} | class ____(GestureTool):
''' A base class for tools that perform "inspections", e.g. ``HoverTool``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
toggleable = DeprecatedAlias("visible", since=(3, 4, 0))
| InspectTool |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 13711,
"end": 15027
} | class ____(BaseLinksSerializer, serializers.Serializer):
"""Serializer with all the user-facing URLs under Read the Docs."""
documentation = serializers.SerializerMethodField()
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
downloads = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse("projects_detail", kwargs={"project_slug": obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse("builds_project_list", kwargs={"project_slug": obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse("project_version_list", kwargs={"project_slug": obj.slug})
return self._absolute_url(path)
def get_downloads(self, obj):
# We are deprecating this field.
# The URL for the page returned was deleted in ext-theme.
# We are returning `None` just to not delete the field.
return None
def get_documentation(self, obj):
version = getattr(self.parent, "version", None)
resolver = getattr(self.parent, "resolver", Resolver())
return resolver.resolve_version(project=obj, version=version)
| ProjectURLsSerializer |
python | ray-project__ray | python/ray/tests/test_runtime_env_packaging.py | {
"start": 8296,
"end": 9272
} | class ____:
def test_create_upload_once(self, tmp_path, random_dir, ray_start_regular):
uri = get_uri_for_directory(random_dir, include_gitignore=True)
uploaded = upload_package_if_needed(
uri, tmp_path, random_dir, include_gitignore=True
)
assert uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(
uri, tmp_path, random_dir, include_gitignore=True
)
assert not uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
# Delete the URI from the internal_kv. This should trigger re-upload.
_internal_kv_del(uri, namespace=KV_NAMESPACE_PACKAGE)
assert not _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(
uri, tmp_path, random_dir, include_gitignore=True
)
assert uploaded
| TestUploadPackageIfNeeded |
python | streamlit__streamlit | lib/streamlit/runtime/media_file_manager.py | {
"start": 2489,
"end": 15173
} | class ____:
"""In-memory file manager for MediaFile objects.
This keeps track of:
- Which files exist, and what their IDs are. This is important so we can
serve files by ID -- that's the whole point of this class!
- Which files are being used by which AppSession (by ID). This is
important so we can remove files from memory when no more sessions need
them.
- The exact location in the app where each file is being used (i.e. the
file's "coordinates"). This is is important so we can mark a file as "not
being used by a certain session" if it gets replaced by another file at
the same coordinates. For example, when doing an animation where the same
image is constantly replace with new frames. (This doesn't solve the case
where the file's coordinates keep changing for some reason, though! e.g.
if new elements keep being prepended to the app. Unlikely to happen, but
we should address it at some point.)
"""
def __init__(self, storage: MediaFileStorage) -> None:
self._storage = storage
# Dict of [file_id -> MediaFileMetadata]
self._file_metadata: dict[str, MediaFileMetadata] = {}
# Dict[session ID][coordinates] -> file_id.
self._files_by_session_and_coord: dict[str, dict[str, str]] = (
collections.defaultdict(dict)
)
# Dict of [file_id -> deferred callable metadata]
# Used for deferred download button execution
self._deferred_callables: dict[str, DeferredCallableEntry] = {}
# MediaFileManager is used from multiple threads, so all operations
# need to be protected with a Lock. (This is not an RLock, which
# means taking it multiple times from the same thread will deadlock.)
self._lock = threading.Lock()
def _get_inactive_file_ids(self) -> set[str]:
"""Compute the set of files that are stored in the manager, but are
not referenced by any active session. These are files that can be
safely deleted.
Thread safety: callers must hold `self._lock`.
"""
# Get the set of all our file IDs.
file_ids = set(self._file_metadata.keys())
# Subtract all IDs that are in use by each session
for session_file_ids_by_coord in self._files_by_session_and_coord.values():
file_ids.difference_update(session_file_ids_by_coord.values())
return file_ids
def remove_orphaned_files(self) -> None:
"""Remove all files that are no longer referenced by any active session.
Safe to call from any thread.
"""
_LOGGER.debug("Removing orphaned files...")
with self._lock:
for file_id in self._get_inactive_file_ids():
file = self._file_metadata[file_id]
if file.kind == MediaFileKind.MEDIA:
self._delete_file(file_id)
elif file.kind == MediaFileKind.DOWNLOADABLE:
if file.is_marked_for_delete:
self._delete_file(file_id)
else:
file.mark_for_delete()
# Clean up orphaned deferred callables
self._remove_orphaned_deferred_callables()
def _remove_orphaned_deferred_callables(self) -> None:
"""Remove deferred callables that are not referenced by any active session.
Thread safety: callers must hold `self._lock`.
"""
_LOGGER.debug("Removing orphaned deferred callables...")
# Get all file_ids currently referenced by any session
active_file_ids = set[str]()
for session_file_ids_by_coord in self._files_by_session_and_coord.values():
active_file_ids.update(session_file_ids_by_coord.values())
# Remove deferred callables that are no longer referenced
deferred_ids_to_remove = [
file_id
for file_id in self._deferred_callables
if file_id not in active_file_ids
]
for file_id in deferred_ids_to_remove:
_LOGGER.debug("Removing deferred callable: %s", file_id)
del self._deferred_callables[file_id]
def _delete_file(self, file_id: str) -> None:
"""Delete the given file from storage, and remove its metadata from
self._files_by_id.
Thread safety: callers must hold `self._lock`.
"""
_LOGGER.debug("Deleting File: %s", file_id)
self._storage.delete_file(file_id)
del self._file_metadata[file_id]
def clear_session_refs(self, session_id: str | None = None) -> None:
"""Remove the given session's file references.
(This does not remove any files from the manager - you must call
`remove_orphaned_files` for that.)
Should be called whenever ScriptRunner completes and when a session ends.
Safe to call from any thread.
"""
if session_id is None:
session_id = _get_session_id()
_LOGGER.debug("Disconnecting files for session with ID %s", session_id)
with self._lock:
if session_id in self._files_by_session_and_coord:
del self._files_by_session_and_coord[session_id]
# Don't immediately delete deferred callables here to avoid race conditions.
# They will be cleaned up by remove_orphaned_deferred_callables() which
# only removes callables that are not referenced by ANY session.
_LOGGER.debug(
"Sessions still active: %r", self._files_by_session_and_coord.keys()
)
_LOGGER.debug(
"Files: %s; Sessions with files: %s",
len(self._file_metadata),
len(self._files_by_session_and_coord),
)
def add(
self,
path_or_data: bytes | str,
mimetype: str,
coordinates: str,
file_name: str | None = None,
is_for_static_download: bool = False,
) -> str:
"""Add a new MediaFile with the given parameters and return its URL.
If an identical file already exists, return the existing URL
and registers the current session as a user.
Safe to call from any thread.
Parameters
----------
path_or_data : bytes or str
If bytes: the media file's raw data. If str: the name of a file
to load from disk.
mimetype : str
The mime type for the file. E.g. "audio/mpeg".
This string will be used in the "Content-Type" header when the file
is served over HTTP.
coordinates : str
Unique string identifying an element's location.
Prevents memory leak of "forgotten" file IDs when element media
is being replaced-in-place (e.g. an st.image stream).
coordinates should be of the form: "1.(3.-14).5"
file_name : str or None
Optional file_name. Used to set the filename in the response header.
is_for_static_download: bool
Indicate that data stored for downloading as a file,
not as a media for rendering at page. [default: False]
Returns
-------
str
The url that the frontend can use to fetch the media.
Raises
------
If a filename is passed, any Exception raised when trying to read the
file will be re-raised.
"""
session_id = _get_session_id()
with self._lock:
kind = (
MediaFileKind.DOWNLOADABLE
if is_for_static_download
else MediaFileKind.MEDIA
)
file_id = self._storage.load_and_get_id(
path_or_data, mimetype, kind, file_name
)
metadata = MediaFileMetadata(kind=kind)
self._file_metadata[file_id] = metadata
self._files_by_session_and_coord[session_id][coordinates] = file_id
return self._storage.get_url(file_id)
def add_deferred(
self,
data_callable: Callable[[], bytes | str | BinaryIO | TextIO | io.RawIOBase],
mimetype: str | None,
coordinates: str,
file_name: str | None = None,
) -> str:
"""Register a callable for deferred execution. Returns placeholder file_id.
The callable will be executed later when execute_deferred() is called,
typically when the user clicks a download button.
Safe to call from any thread.
Parameters
----------
data_callable : Callable[[], bytes | str | BinaryIO | TextIO | io.RawIOBase]
A callable that returns the file data when invoked.
mimetype : str or None
The mime type for the file. E.g. "text/csv".
If None, the mimetype will be inferred from the data type when
execute_deferred() is called.
coordinates : str
Unique string identifying an element's location.
file_name : str or None
Optional file_name. Used to set the filename in the response header.
Returns
-------
str
A placeholder file_id that can be used to execute the callable later.
"""
session_id = _get_session_id()
with self._lock:
# Generate a unique placeholder ID for this deferred callable
# Expected: a new placeholder ID is created on every script rerun.
file_id = uuid.uuid4().hex
# Store the callable with its metadata
self._deferred_callables[file_id] = cast(
"DeferredCallableEntry",
{
"callable": data_callable,
"mimetype": mimetype,
"filename": file_name,
"coordinates": coordinates,
},
)
# Track this deferred file by session and coordinate
self._files_by_session_and_coord[session_id][coordinates] = file_id
return file_id
def execute_deferred(self, file_id: str) -> str:
"""Execute a deferred callable and return the URL to the generated file.
This method retrieves the callable registered with add_deferred(),
executes it, stores the result, and returns a URL to access it.
Safe to call from any thread.
Parameters
----------
file_id : str
The placeholder file_id returned by add_deferred().
Returns
-------
str
The URL that can be used to download the generated file.
Raises
------
MediaFileStorageError
If the file_id is not found or if the callable execution fails.
"""
# Retrieve deferred callable metadata while holding lock
with self._lock:
if file_id not in self._deferred_callables:
raise MediaFileStorageError(f"Deferred file {file_id} not found")
deferred = self._deferred_callables[file_id]
# Execute callable outside lock to avoid blocking other operations
try:
data = deferred["callable"]()
except Exception as e:
raise MediaFileStorageError(f"Callable execution failed: {e}") from e
# Convert data to bytes and infer mimetype if needed
data_as_bytes, inferred_mime_type = convert_data_to_bytes_and_infer_mime(
data,
unsupported_error=MediaFileStorageError(
f"Callable returned unsupported type: {type(data)}"
),
)
# Use provided mimetype if available, otherwise use inferred mimetype
mime_type: str = deferred["mimetype"] or inferred_mime_type
# Store the generated data and get the actual file_id
with self._lock:
actual_file_id = self._storage.load_and_get_id(
data_as_bytes,
mime_type,
MediaFileKind.DOWNLOADABLE,
deferred["filename"],
)
# Create metadata for the actual file
metadata = MediaFileMetadata(kind=MediaFileKind.DOWNLOADABLE)
self._file_metadata[actual_file_id] = metadata
# Keep the deferred callable so users can download multiple times
# It will be cleaned up when clear_session_refs() is called on rerun
# We leave actual_file_id unmapped so repeat clicks rerun the callable.
# Cleanup prunes the stored file once no session references it.
# Return the URL to access the file
return self._storage.get_url(actual_file_id)
| MediaFileManager |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/middleware/anthropic_tools.py | {
"start": 2487,
"end": 4772
} | class ____(AgentState):
"""State schema for Anthropic text editor and memory tools."""
text_editor_files: NotRequired[Annotated[dict[str, FileData], files_reducer]]
"""Virtual file system for text editor tools."""
memory_files: NotRequired[Annotated[dict[str, FileData], files_reducer]]
"""Virtual file system for memory tools."""
def _validate_path(path: str, *, allowed_prefixes: Sequence[str] | None = None) -> str:
"""Validate and normalize file path for security.
Args:
path: The path to validate.
allowed_prefixes: Optional list of allowed path prefixes.
Returns:
Normalized canonical path.
Raises:
ValueError: If path contains traversal sequences or violates prefix rules.
"""
# Reject paths with traversal attempts
if ".." in path or path.startswith("~"):
msg = f"Path traversal not allowed: {path}"
raise ValueError(msg)
# Normalize path (resolve ., //, etc.)
normalized = os.path.normpath(path)
# Convert to forward slashes for consistency
normalized = normalized.replace("\\", "/")
# Ensure path starts with /
if not normalized.startswith("/"):
normalized = f"/{normalized}"
# Check allowed prefixes if specified
if allowed_prefixes is not None and not any(
normalized.startswith(prefix) for prefix in allowed_prefixes
):
msg = f"Path must start with one of {allowed_prefixes}: {path}"
raise ValueError(msg)
return normalized
def _list_directory(files: dict[str, FileData], path: str) -> list[str]:
"""List files in a directory.
Args:
files: Files `dict`.
path: Normalized directory path.
Returns:
Sorted list of file paths in the directory.
"""
# Ensure path ends with / for directory matching
dir_path = path if path.endswith("/") else f"{path}/"
matching_files = []
for file_path in files:
if file_path.startswith(dir_path):
# Get relative path from directory
relative = file_path[len(dir_path) :]
# Only include direct children (no subdirectories)
if "/" not in relative:
matching_files.append(file_path)
return sorted(matching_files)
| AnthropicToolsState |
python | pyinstaller__pyinstaller | PyInstaller/archive/readers.py | {
"start": 719,
"end": 1256
} | class ____(TypeError):
pass
# Type codes for CArchive TOC entries
PKG_ITEM_BINARY = 'b' # binary
PKG_ITEM_DEPENDENCY = 'd' # runtime option
PKG_ITEM_PYZ = 'z' # zlib (pyz) - frozen Python code
PKG_ITEM_ZIPFILE = 'Z' # zlib (pyz) - frozen Python code
PKG_ITEM_PYPACKAGE = 'M' # Python package (__init__.py)
PKG_ITEM_PYMODULE = 'm' # Python module
PKG_ITEM_PYSOURCE = 's' # Python script (v3)
PKG_ITEM_DATA = 'x' # data
PKG_ITEM_RUNTIME_OPTION = 'o' # runtime option
PKG_ITEM_SPLASH = 'l' # splash resources
| NotAnArchiveError |
python | kamyu104__LeetCode-Solutions | Python/count-special-subsequences.py | {
"start": 67,
"end": 628
} | class ____(object):
def numberOfSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cnt = collections.defaultdict(int)
result = 0
for r in xrange(4, len(nums)-2):
q = r-2
for p in xrange((q-2)+1):
cnt[float(nums[p])/nums[q]] += 1
for s in xrange(r+2, len(nums)):
result += cnt[float(nums[s])/nums[r]]
return result
# Time: O(n^2 * logr)
# Space: O(n^2)
import collections
# freq table, number theory
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 30869,
"end": 31081
} | class ____(PrefectBaseModel):
"""Filter by `Artifact.task_run_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of task run IDs to include"
)
| ArtifactFilterTaskRunId |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py | {
"start": 2078,
"end": 7974
} | class ____(RpcAgentTestFixture):
"""
Run tests for rpc_async in JIT under the faulty agent test fixture to test
arbitrary timeouts.
"""
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_torchscript_function(self):
# Call rpc_async + fut.wait() in torchscript function and ensure that
# timeout is raised.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
# Ensure that we get a timeout if we override the default timeout and
# the RPC takes longer to execute.
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
# Ensure that we timeout if we don't specify a timeout but the default
# is less than the RPC takes to execute.
rpc._set_rpc_timeout(0.001)
with self.assertRaisesRegex(RuntimeError, expected_error):
script_rpc_async_call(dst_worker_name, args, kwargs)
# Ensure that we run to completion if zero timeout is specified.
ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
self.assertEqual(ret, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_python(self):
# Ensures timeouts are raised if we call rpc_async from within a
# torchscript function, but wait on the future in python.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure timeout if we don't specify but the default is less than the
# RPC takes to execute.
rpc._set_rpc_timeout(0.001)
fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if zero timeout is specified
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
result = fut.wait()
self.assertEqual(result, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_remote_timeout_to_here_in_jit(self):
# Test that calling to_here() in JIT will raise timeout error if
# rpc.remote failed.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call to_here() within a ScriptFunction and ensure it raises
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref_to_here(rref)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_to_here_with_timeout(rref, 0.01)
rref_to_here_with_timeout(rref, 100)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with RRef arg in JIT, which will go through JIT pickling and
# ensure error is raised.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc_async_with_rref_arg(dst_worker, (rref,))
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_script_func(self):
# Similar to above test, but calls python rpc with script function.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with script function that takes RRef, ensure timeout during pickling
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_sync(dst_worker, rref_to_here, args=(rref,))
| JitFaultyAgentRpcTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/data_version_cache.py | {
"start": 1918,
"end": 10140
} | class ____:
def __init__(self, context: "StepExecutionContext"):
self._context = context
self.input_asset_version_info: dict[AssetKey, Optional[InputAssetVersionInfo]] = {}
self.is_external_input_asset_version_info_loaded = False
self.values: dict[AssetKey, DataVersion] = {}
def set_data_version(self, asset_key: AssetKey, data_version: "DataVersion") -> None:
self.values[asset_key] = data_version
def has_data_version(self, asset_key: AssetKey) -> bool:
return asset_key in self.values
def get_data_version(self, asset_key: AssetKey) -> "DataVersion":
return self.values[asset_key]
def maybe_fetch_and_get_input_asset_version_info(
self, key: AssetKey
) -> Optional["InputAssetVersionInfo"]:
if key not in self.input_asset_version_info:
self._fetch_input_asset_version_info([key])
return self.input_asset_version_info[key]
# "external" refers to records for inputs generated outside of this step
def fetch_external_input_asset_version_info(self) -> None:
output_keys = self._context.get_output_asset_keys()
all_dep_keys: list[AssetKey] = []
for output_key in output_keys:
if not self._context.job_def.asset_layer.has(output_key):
continue
dep_keys = self._context.job_def.asset_layer.get(output_key).parent_keys
for key in dep_keys:
if key not in all_dep_keys and key not in output_keys:
all_dep_keys.append(key)
self.input_asset_version_info = {}
self._fetch_input_asset_version_info(all_dep_keys)
self.is_external_input_asset_version_info_loaded = True
def _fetch_input_asset_version_info(self, asset_keys: Sequence[AssetKey]) -> None:
from dagster._core.definitions.data_version import extract_data_version_from_entry
asset_records_by_key = self._fetch_asset_records(asset_keys)
for key in asset_keys:
asset_record = asset_records_by_key.get(key)
event = self._get_input_asset_event(key, asset_record)
if event is None:
self.input_asset_version_info[key] = None
else:
storage_id = event.storage_id
# Input name will be none if this is an internal dep
input_name = self._context.job_def.asset_layer.get_node_input_name(
self._context.node_handle, key
)
# Exclude AllPartitionMapping for now to avoid huge queries
if input_name and self._context.has_asset_partitions_for_input(input_name):
subset = self._context.asset_partitions_subset_for_input(
input_name, require_valid_partitions=False
)
input_keys = list(subset.get_partition_keys())
# This check represents a temporary constraint that prevents huge query results for upstream
# partition data versions from timing out runs. If a partitioned dependency (a) uses an
# AllPartitionMapping; and (b) has greater than or equal to
# SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD dependency partitions, then we
# process it as a non-partitioned dependency (note that this was the behavior for
# all partition dependencies prior to 2023-08). This means that stale status
# results cannot be accurately computed for the dependency, and there is thus
# corresponding logic in the CachingStaleStatusResolver to account for this. This
# constraint should be removed when we have thoroughly examined the performance of
# the data version retrieval query and can guarantee decent performance.
if len(input_keys) < SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD:
data_version = self._get_partitions_data_version_from_keys(key, input_keys)
else:
data_version = extract_data_version_from_entry(event.event_log_entry)
else:
data_version = extract_data_version_from_entry(event.event_log_entry)
self.input_asset_version_info[key] = InputAssetVersionInfo(
storage_id,
check.not_none(event.event_log_entry.dagster_event).event_type,
data_version,
event.run_id,
event.timestamp,
)
def _fetch_asset_records(self, asset_keys: Sequence[AssetKey]) -> dict[AssetKey, "AssetRecord"]:
batch_size = int(os.getenv("GET_ASSET_RECORDS_FOR_DATA_VERSION_BATCH_SIZE", "100"))
asset_records_by_key = {}
to_fetch = asset_keys
while len(to_fetch):
for record in self._context.instance.get_asset_records(to_fetch[:batch_size]):
asset_records_by_key[record.asset_entry.asset_key] = record
to_fetch = to_fetch[batch_size:]
return asset_records_by_key
def _get_input_asset_event(
self, key: AssetKey, asset_record: Optional["AssetRecord"]
) -> Optional["EventLogRecord"]:
event = None
if asset_record and asset_record.asset_entry.last_materialization_record:
event = asset_record.asset_entry.last_materialization_record
elif (
asset_record
and self._context.instance.event_log_storage.asset_records_have_last_observation
):
event = asset_record.asset_entry.last_observation_record
if (
not event
and not self._context.instance.event_log_storage.asset_records_have_last_observation
):
event = next(
iter(self._context.instance.fetch_observations(key, limit=1).records), None
)
if event:
self._check_input_asset_event(key, event)
return event
def _check_input_asset_event(self, key: AssetKey, event: "EventLogRecord") -> None:
assert event.event_log_entry
event_data_version = extract_data_version_from_entry(event.event_log_entry)
if key in self.values and self.values[key] != event_data_version:
self._context.log.warning(
f"Data version mismatch for asset {key}. Data version from materialization within"
f" current step is `{self.values[key]}`. Data version from most recent"
f" materialization is `{event_data_version}`. Most recent materialization will be"
" used for provenance tracking."
)
def _get_partitions_data_version_from_keys(
self, key: AssetKey, partition_keys: Sequence[str]
) -> "DataVersion":
from dagster._core.definitions.data_version import DataVersion
from dagster._core.events import DagsterEventType
# TODO: this needs to account for observations also
event_type = DagsterEventType.ASSET_MATERIALIZATION
tags_by_partition = self._context.instance._event_storage.get_latest_tags_by_partition( # noqa: SLF001
key, event_type, [DATA_VERSION_TAG], asset_partitions=list(partition_keys)
)
partition_data_versions = [
pair[1][DATA_VERSION_TAG]
for pair in sorted(tags_by_partition.items(), key=lambda x: x[0])
]
hash_sig = sha256()
hash_sig.update(bytearray("".join(partition_data_versions), "utf8"))
return DataVersion(hash_sig.hexdigest())
# Call this to clear the cache for an input asset record. This is necessary when an old
# materialization for an asset was loaded during `fetch_external_input_asset_records` because an
# intrastep asset is not required, but then that asset is materialized during the step. If we
# don't clear the cache for this asset, then we won't use the most up-to-date asset record.
def wipe_input_asset_version_info(self, key: AssetKey) -> None:
if key in self.input_asset_version_info:
del self.input_asset_version_info[key]
| DataVersionCache |
python | kubernetes-client__python | kubernetes/client/models/v1_flex_volume_source.py | {
"start": 383,
"end": 7390
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'driver': 'str',
'fs_type': 'str',
'options': 'dict(str, str)',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference'
}
attribute_map = {
'driver': 'driver',
'fs_type': 'fsType',
'options': 'options',
'read_only': 'readOnly',
'secret_ref': 'secretRef'
}
def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
"""V1FlexVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._driver = None
self._fs_type = None
self._options = None
self._read_only = None
self._secret_ref = None
self.discriminator = None
self.driver = driver
if fs_type is not None:
self.fs_type = fs_type
if options is not None:
self.options = options
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
@property
def driver(self):
"""Gets the driver of this V1FlexVolumeSource. # noqa: E501
driver is the name of the driver to use for this volume. # noqa: E501
:return: The driver of this V1FlexVolumeSource. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1FlexVolumeSource.
driver is the name of the driver to use for this volume. # noqa: E501
:param driver: The driver of this V1FlexVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def fs_type(self):
"""Gets the fs_type of this V1FlexVolumeSource. # noqa: E501
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:return: The fs_type of this V1FlexVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1FlexVolumeSource.
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:param fs_type: The fs_type of this V1FlexVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def options(self):
"""Gets the options of this V1FlexVolumeSource. # noqa: E501
options is Optional: this field holds extra command options if any. # noqa: E501
:return: The options of this V1FlexVolumeSource. # noqa: E501
:rtype: dict(str, str)
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this V1FlexVolumeSource.
options is Optional: this field holds extra command options if any. # noqa: E501
:param options: The options of this V1FlexVolumeSource. # noqa: E501
:type: dict(str, str)
"""
self._options = options
@property
def read_only(self):
"""Gets the read_only of this V1FlexVolumeSource. # noqa: E501
readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1FlexVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1FlexVolumeSource.
readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1FlexVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1FlexVolumeSource. # noqa: E501
:return: The secret_ref of this V1FlexVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1FlexVolumeSource.
:param secret_ref: The secret_ref of this V1FlexVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1FlexVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1FlexVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1FlexVolumeSource |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol40.py | {
"start": 630,
"end": 776
} | class ____(Generic[S]):
def f0(self, other: Self) -> "C2[S]":
return other
a2: P2Parent[str] = C2[str]()
b2: P2Child[str] = C2[str]()
| C2 |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 2066,
"end": 2248
} | class ____(TestModelMixin, TestBase):
def testUnregister(self):
reversion.unregister(TestModel)
self.assertFalse(reversion.is_registered(TestModel))
| UnregisterTest |
python | doocs__leetcode | solution/3700-3799/3707.Equal Score Substrings/Solution.py | {
"start": 0,
"end": 297
} | class ____:
def scoreBalance(self, s: str) -> bool:
l = 0
r = sum(ord(c) - ord("a") + 1 for c in s)
for c in s[:-1]:
x = ord(c) - ord("a") + 1
l += x
r -= x
if l == r:
return True
return False
| Solution |
python | facebook__pyre-check | tools/incremental_test/specification.py | {
"start": 9542,
"end": 9969
} | class ____(SingleUpdate):
commit_hash: str
def update(self, environment: Environment, working_directory: Path) -> None:
environment.checked_run(
working_directory=working_directory,
command=f"hg update --clean {self.commit_hash}",
)
def to_json(self) -> Dict[str, Any]:
return {"kind": "hg", "commit_hash": self.commit_hash}
@dataclass(frozen=True)
| HgRepositoryUpdate |
python | bokeh__bokeh | src/bokeh/core/property/descriptors.py | {
"start": 7265,
"end": 25234
} | class ____(Generic[T]):
""" A base class for Bokeh properties with simple get/set and serialization
behavior.
"""
name: str
#property: Property[T]
__doc__: str | None
def __init__(self, name: str, property: Property[T]) -> None:
""" Create a PropertyDescriptor for basic Bokeh properties.
Args:
name (str) : The attribute name that this property is for
property (Property) : A basic property to create a descriptor for
"""
self.name = name
self.property = property
self.__doc__ = self.property.__doc__
def __str__(self) -> str:
""" Basic string representation of ``PropertyDescriptor``.
Delegates to ``self.property.__str__``
"""
return f"{self.property}"
def __get__(self, obj: HasProps | None, owner: type[HasProps] | None) -> T:
""" Implement the getter for the Python `descriptor protocol`_.
For instance attribute access, we delegate to the |Property|. For
class attribute access, we return ourself.
Args:
obj (HasProps or None) :
The instance to set a new property value on (for instance
attribute access), or None (for class attribute access)
owner (obj) :
The new value to set the property to
Returns:
None
Examples:
.. code-block:: python
>>> from bokeh.models import Range1d
>>> r = Range1d(start=10, end=20)
# instance attribute access, returns the property value
>>> r.start
10
# class attribute access, returns the property descriptor
>>> Range1d.start
<bokeh.core.property.descriptors.PropertyDescriptor at 0x1148b3390>
"""
if obj is not None:
value = self._get(obj)
if value is Undefined:
raise UnsetValueError(f"{obj}.{self.name} doesn't have a value set")
return value
elif owner is not None:
return self
# This should really never happen. If it does, __get__ was called in a bad way.
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj: HasProps, value: T, *, setter: Setter | None = None) -> None:
""" Implement the setter for the Python `descriptor protocol`_.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
"""
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
class_name = obj.__class__.__name__
raise RuntimeError(f"Cannot set a property value {self.name!r} on a {class_name} instance before HasProps.__init__")
if self.property.readonly and obj._initialized:
class_name = obj.__class__.__name__
raise RuntimeError(f"{class_name}.{self.name} is a readonly property")
value = self.property.prepare_value(obj, self.name, value)
old = self._get(obj)
self._set(obj, old, value, setter=setter)
def __delete__(self, obj: HasProps) -> None:
""" Implement the deleter for the Python `descriptor protocol`_.
Args:
obj (HasProps) : An instance to delete this property from
"""
if self.name in obj._property_values:
old_value = obj._property_values[self.name]
del obj._property_values[self.name]
self.trigger_if_changed(obj, old_value)
if self.name in obj._unstable_default_values:
del obj._unstable_default_values[self.name]
def class_default(self, cls: type[HasProps], *, no_eval: bool = False):
""" Get the default value for a specific subtype of ``HasProps``,
which may not be used for an individual instance.
Args:
cls (class) : The class to get the default value for.
no_eval (bool, optional) :
Whether to evaluate callables for defaults (default: False)
Returns:
object
"""
return self.property.themed_default(cls, self.name, None, no_eval=no_eval)
def instance_default(self, obj: HasProps) -> T:
""" Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object
"""
return self.property.themed_default(obj.__class__, self.name, obj.themed_values())
def get_value(self, obj: HasProps) -> Any:
""" Produce the value used for serialization.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
Any
"""
return self.__get__(obj, obj.__class__)
def set_from_json(self, obj: HasProps, value: Any, *, setter: Setter | None = None):
"""Sets the value of this property from a JSON value.
Args:
obj: (HasProps) : instance to set the property value on
value: (JSON-value) : value to set to the attribute to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
"""
value = self.property.prepare_value(obj, self.name, value)
old = self._get(obj)
self._set(obj, old, value, setter=setter)
def trigger_if_changed(self, obj: HasProps, old: Any) -> None:
""" Send a change event notification if the property is set to a
value is not equal to ``old``.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
Returns:
None
"""
new_value = self.__get__(obj, obj.__class__)
if not self.property.matches(old, new_value):
self._trigger(obj, old, new_value)
@property
def has_ref(self) -> bool:
""" Whether the property can refer to another ``HasProps`` instance.
For basic properties, delegate to the ``has_ref`` attribute on the
|Property|.
"""
return self.property.has_ref
@property
def readonly(self) -> bool:
""" Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by BokehJS
in the browser).
"""
return self.property.readonly
@property
def serialized(self) -> bool:
""" Whether the property should be serialized when serializing an
object.
This would be False for a "virtual" or "convenience" property that
duplicates information already available in other properties, for
example.
"""
return self.property.serialized
def has_unstable_default(self, obj: HasProps) -> bool:
# _may_have_unstable_default() doesn't have access to overrides, so check manually
return self.property._may_have_unstable_default() or \
self.is_unstable(obj.__overridden_defaults__.get(self.name, None))
@classmethod
def is_unstable(cls, value: Any) -> TypeGuard[Callable[[], Any]]:
from types import FunctionType
from .instance import InstanceDefault
return isinstance(value, (FunctionType, InstanceDefault))
def _get(self, obj: HasProps) -> T:
""" Internal implementation of instance attribute access for the
``PropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
"""
if not hasattr(obj, '_property_values'):
class_name = obj.__class__.__name__
raise RuntimeError(f"Cannot get a property value {self.name!r} from a {class_name} instance before HasProps.__init__")
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name]
def _get_default(self, obj: HasProps) -> T:
""" Internal implementation of instance attribute access for default
values.
Handles bookkeeping around ``PropertyContainer`` value, etc.
"""
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
themed_values = obj.themed_values()
is_themed = themed_values is not None and self.name in themed_values
unstable_dict = obj._unstable_themed_values if is_themed else obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
# Ensure we do not look up the default until after we check if it already present
# in the unstable_dict because it is a very expensive operation
# Ref: https://github.com/bokeh/bokeh/pull/13174
default = self.instance_default(obj)
if self.has_unstable_default(obj):
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default
def _set_value(self, obj: HasProps, value: Any) -> None:
""" Actual descriptor value assignment. """
if isinstance(value, PropertyValueContainer):
value._register_owner(obj, self)
if self.name in obj._unstable_themed_values:
del obj._unstable_themed_values[self.name]
if self.name in obj._unstable_default_values:
del obj._unstable_default_values[self.name]
obj._property_values[self.name] = value
def _set(self, obj: HasProps, old: Any, value: Any, *,
hint: DocumentPatchedEvent | None = None, setter: Setter | None = None) -> None:
""" Internal implementation helper to set property values.
This function handles bookkeeping around noting whether values have
been explicitly set, etc.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
"""
if value is Undefined:
raise RuntimeError("internal error attempting to set Undefined value")
# Normally we want a "no-op" if the new value and old value are identical
# but some hinted events are in-place. This check will allow those cases
# to continue on to the notification machinery
if self.property.matches(value, old) and (hint is None):
return
was_set = self.name in obj._property_values
# "old" is the logical old value, but it may not be the actual current
# attribute value if our value was mutated behind our back and we got
# _notify_mutated.
old_attr_value = obj._property_values[self.name] if was_set else old
if old_attr_value is not value:
if isinstance(old_attr_value, PropertyValueContainer):
old_attr_value._unregister_owner(obj, self)
self._set_value(obj, value)
# for notification purposes, "old" should be the logical old
self._trigger(obj, old, value, hint=hint, setter=setter)
# called when a container is mutated "behind our back" and
# we detect it with our collection wrappers.
def _notify_mutated(self, obj: HasProps, old: Any, hint: DocumentPatchedEvent | None = None) -> None:
""" A method to call when a container is mutated "behind our back"
and we detect it with our ``PropertyContainer`` wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None
"""
value = self.__get__(obj, obj.__class__)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.property.prepare_value(obj, self.name, value, hint=hint)
self._set(obj, old, value, hint=hint)
def _trigger(self, obj: HasProps, old: Any, value: Any, *,
hint: DocumentPatchedEvent | None = None, setter: Setter | None = None) -> None:
""" Unconditionally send a change event notification for the property.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property
new (obj) :
The new value of the property
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
"""
if hasattr(obj, 'trigger'):
obj.trigger(self.name, old, value, hint, setter)
_CDS_SET_FROM_CDS_ERROR = """
ColumnDataSource.data properties may only be set from plain Python dicts,
not other ColumnDataSource.data values.
If you need to copy set from one CDS to another, make a shallow copy by
calling dict: s1.data = dict(s2.data)
"""
| PropertyDescriptor |
python | joke2k__faker | faker/providers/person/en_GB/__init__.py | {
"start": 81,
"end": 22879
} | class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats = formats_female + formats_male
# Names from
# http://webarchive.nationalarchives.gov.uk/20160105160709/http://ons.gov.uk/ons/publications/re-reference-tables.html?edition=tcm%3A77-243767
first_names_male = (
"David",
"Paul",
"Christopher",
"Thomas",
"John",
"Mark",
"James",
"Stephen",
"Andrew",
"Jack",
"Michael",
"Daniel",
"Peter",
"Richard",
"Matthew",
"Robert",
"Ryan",
"Joshua",
"Alan",
"Ian",
"Simon",
"Luke",
"Samuel",
"Jordan",
"Anthony",
"Adam",
"Lee",
"Alexander",
"William",
"Kevin",
"Darren",
"Benjamin",
"Philip",
"Gary",
"Joseph",
"Brian",
"Steven",
"Liam",
"Keith",
"Martin",
"Jason",
"Jonathan",
"Jake",
"Graham",
"Nicholas",
"Craig",
"George",
"Colin",
"Neil",
"Lewis",
"Nigel",
"Oliver",
"Timothy",
"Stuart",
"Kenneth",
"Raymond",
"Jamie",
"Nathan",
"Geoffrey",
"Connor",
"Terence",
"Trevor",
"Adrian",
"Harry",
"Malcolm",
"Scott",
"Callum",
"Wayne",
"Aaron",
"Barry",
"Ashley",
"Bradley",
"Patrick",
"Gareth",
"Jacob",
"Sean",
"Kieran",
"Derek",
"Carl",
"Dean",
"Charles",
"Sam",
"Shaun",
"Ben",
"Roger",
"Mohammed",
"Leslie",
"Ronald",
"Kyle",
"Clive",
"Edward",
"Antony",
"Jeremy",
"Justin",
"Jeffrey",
"Christian",
"Roy",
"Karl",
"Alex",
"Gordon",
"Dominic",
"Joe",
"Marc",
"Reece",
"Dennis",
"Russell",
"Gavin",
"Rhys",
"Phillip",
"Allan",
"Robin",
"Charlie",
"Gerald",
"Ross",
"Francis",
"Eric",
"Julian",
"Bernard",
"Dale",
"Donald",
"Damian",
"Frank",
"Shane",
"Cameron",
"Norman",
"Duncan",
"Louis",
"Frederick",
"Tony",
"Howard",
"Conor",
"Douglas",
"Garry",
"Elliot",
"Marcus",
"Arthur",
"Vincent",
"Max",
"Mathew",
"Abdul",
"Henry",
"Martyn",
"Ricky",
"Leonard",
"Lawrence",
"Glen",
"Mitchell",
"Gerard",
"Gregory",
"Iain",
"Billy",
"Bryan",
"Joel",
"Clifford",
"Josh",
"Leon",
"Stewart",
"Mohammad",
"Dylan",
"Graeme",
"Terry",
"Guy",
"Elliott",
"Stanley",
"Danny",
"Brandon",
"Victor",
"Toby",
"Hugh",
"Mohamed",
"Brett",
"Albert",
"Tom",
"Declan",
"Maurice",
"Glenn",
"Leigh",
"Denis",
"Damien",
"Bruce",
"Jay",
"Owen",
)
first_names_female = (
"Susan",
"Sarah",
"Rebecca",
"Linda",
"Julie",
"Claire",
"Laura",
"Lauren",
"Christine",
"Karen",
"Nicola",
"Gemma",
"Jessica",
"Margaret",
"Jacqueline",
"Emma",
"Charlotte",
"Janet",
"Deborah",
"Lisa",
"Hannah",
"Patricia",
"Tracey",
"Joanne",
"Sophie",
"Carol",
"Jane",
"Michelle",
"Victoria",
"Amy",
"Elizabeth",
"Helen",
"Samantha",
"Emily",
"Mary",
"Diane",
"Rachel",
"Anne",
"Sharon",
"Ann",
"Tracy",
"Amanda",
"Jennifer",
"Chloe",
"Angela",
"Louise",
"Katie",
"Lucy",
"Barbara",
"Alison",
"Sandra",
"Caroline",
"Clare",
"Kelly",
"Bethany",
"Gillian",
"Natalie",
"Jade",
"Pauline",
"Megan",
"Elaine",
"Alice",
"Lesley",
"Catherine",
"Hayley",
"Pamela",
"Danielle",
"Holly",
"Wendy",
"Abigail",
"Valerie",
"Olivia",
"Jean",
"Dawn",
"Donna",
"Stephanie",
"Leanne",
"Kathleen",
"Natasha",
"Denise",
"Sally",
"Katherine",
"Georgia",
"Maureen",
"Maria",
"Zoe",
"Judith",
"Kerry",
"Debra",
"Melanie",
"Stacey",
"Eleanor",
"Paula",
"Shannon",
"Sheila",
"Joanna",
"Paige",
"Janice",
"Lorraine",
"Georgina",
"Lynn",
"Andrea",
"Suzanne",
"Nicole",
"Yvonne",
"Chelsea",
"Lynne",
"Anna",
"Kirsty",
"Shirley",
"Alexandra",
"Marion",
"Beverley",
"Melissa",
"Rosemary",
"Kimberley",
"Carole",
"Fiona",
"Kate",
"Joan",
"Marie",
"Jenna",
"Marilyn",
"Jodie",
"June",
"Grace",
"Mandy",
"Rachael",
"Lynda",
"Tina",
"Kathryn",
"Molly",
"Jayne",
"Amber",
"Marian",
"Jasmine",
"Brenda",
"Sara",
"Kayleigh",
"Teresa",
"Harriet",
"Julia",
"Ashleigh",
"Heather",
"Kim",
"Ruth",
"Jemma",
"Carly",
"Leah",
"Eileen",
"Francesca",
"Naomi",
"Hilary",
"Abbie",
"Sylvia",
"Katy",
"Irene",
"Cheryl",
"Rosie",
"Dorothy",
"Aimee",
"Vanessa",
"Ellie",
"Frances",
"Sian",
"Josephine",
"Gail",
"Jill",
"Lydia",
"Joyce",
"Charlene",
"Hollie",
"Hazel",
"Annette",
"Bethan",
"Amelia",
"Beth",
"Rita",
"Geraldine",
"Diana",
"Lindsey",
"Carolyn",
)
first_names = first_names_male + first_names_female
last_names = OrderedDict(
(
("Savage", 0.04),
("Winter", 0.03),
("Metcalfe", 0.03),
("Harper", 0.06),
("Burgess", 0.06),
("Bailey", 0.15),
("Potts", 0.03),
("Boyle", 0.03),
("Brown", 0.51),
("Jennings", 0.05),
("Payne", 0.09),
("Day", 0.09),
("Holland", 0.07),
("Higgins", 0.05),
("Rhodes", 0.04),
("Hancock", 0.04),
("Howells", 0.03),
("Fowler", 0.04),
("Sims", 0.03),
("Thomas", 0.35),
("Parker", 0.17),
("Bentley", 0.04),
("Barnett", 0.05),
("Manning", 0.03),
("Collier", 0.03),
("Holloway", 0.03),
("Hartley", 0.04),
("George", 0.05),
("Tomlinson", 0.04),
("Howard", 0.09),
("Long", 0.06),
("Farmer", 0.03),
("Collins", 0.15),
("Rice", 0.03),
("Townsend", 0.04),
("Rees", 0.07),
("Bruce", 0.03),
("Hammond", 0.05),
("Ford", 0.09),
("Tucker", 0.05),
("Wallis", 0.03),
("Hamilton", 0.06),
("Ferguson", 0.04),
("Hooper", 0.03),
("Francis", 0.07),
("Reeves", 0.04),
("Barlow", 0.04),
("Short", 0.04),
("Cunningham", 0.05),
("Hopkins", 0.06),
("Nicholson", 0.06),
("Archer", 0.04),
("Green", 0.25),
("Glover", 0.04),
("Gibson", 0.09),
("Spencer", 0.08),
("Warner", 0.04),
("Webb", 0.12),
("Whitehouse", 0.03),
("Dean", 0.06),
("Griffiths", 0.16),
("Clark", 0.2),
("Hardy", 0.05),
("Iqbal", 0.03),
("Baldwin", 0.04),
("O'Neill", 0.06),
("Blake", 0.05),
("Lees", 0.03),
("Harvey", 0.1),
("Clarke", 0.24),
("Daniels", 0.04),
("Browne", 0.03),
("Macdonald", 0.04),
("Kirk", 0.04),
("Khan", 0.14),
("Davidson", 0.05),
("Dale", 0.04),
("Sanders", 0.04),
("Wilkins", 0.04),
("Connor", 0.03),
("Daly", 0.03),
("Lane", 0.06),
("Kennedy", 0.06),
("Bray", 0.03),
("Burrows", 0.04),
("Hayes", 0.07),
("Wyatt", 0.03),
("Gould", 0.03),
("Dyer", 0.03),
("Nash", 0.05),
("Bryan", 0.03),
("Pope", 0.03),
("Fraser", 0.04),
("Steele", 0.03),
("Walsh", 0.09),
("Wade", 0.04),
("Marsden", 0.03),
("Humphries", 0.03),
("O'Brien", 0.08),
("Thompson", 0.28),
("Lord", 0.03),
("Coleman", 0.06),
("Jarvis", 0.04),
("Noble", 0.03),
("Williamson", 0.06),
("Carpenter", 0.03),
("Gardner", 0.06),
("Farrell", 0.04),
("Clayton", 0.05),
("Akhtar", 0.05),
("Gallagher", 0.05),
("Skinner", 0.04),
("Birch", 0.04),
("Kay", 0.04),
("Barrett", 0.07),
("Bates", 0.06),
("Lucas", 0.04),
("O'Connor", 0.06),
("Chamberlain", 0.03),
("Chapman", 0.12),
("Ryan", 0.08),
("Thorpe", 0.04),
("Lawson", 0.04),
("Howell", 0.04),
("Martin", 0.23),
("Kelly", 0.16),
("Dobson", 0.04),
("Stevens", 0.1),
("Brennan", 0.04),
("Lloyd", 0.11),
("Quinn", 0.05),
("Morton", 0.04),
("Wilson", 0.35),
("Barnes", 0.11),
("Henry", 0.03),
("Smith", 1.15),
("Pritchard", 0.05),
("Phillips", 0.18),
("Dixon", 0.1),
("Sharpe", 0.03),
("Robertson", 0.07),
("White", 0.27),
("Bird", 0.06),
("Abbott", 0.04),
("Kirby", 0.04),
("Hussain", 0.11),
("Barber", 0.05),
("Harris", 0.25),
("Doyle", 0.05),
("Jordan", 0.05),
("Burns", 0.06),
("Hodgson", 0.06),
("Atkins", 0.04),
("Stokes", 0.05),
("Rogers", 0.12),
("Parkes", 0.03),
("Brookes", 0.04),
("Herbert", 0.03),
("Gordon", 0.05),
("Kemp", 0.05),
("Webster", 0.07),
("Sinclair", 0.03),
("McLean", 0.03),
("Saunders", 0.09),
("Stephens", 0.05),
("Newton", 0.07),
("Potter", 0.05),
("Storey", 0.03),
("Stanley", 0.04),
("Turnbull", 0.03),
("Duncan", 0.03),
("Rose", 0.08),
("Mills", 0.11),
("Sheppard", 0.03),
("Butcher", 0.03),
("Fry", 0.03),
("Ross", 0.06),
("Shepherd", 0.06),
("Goodwin", 0.05),
("Holt", 0.05),
("Haynes", 0.04),
("Cook", 0.15),
("Ward", 0.21),
("Godfrey", 0.03),
("Stone", 0.07),
("Dodd", 0.04),
("Parsons", 0.07),
("Ingram", 0.03),
("Nixon", 0.03),
("Evans", 0.39),
("Hargreaves", 0.03),
("Owen", 0.11),
("Chan", 0.03),
("Connolly", 0.03),
("Charlton", 0.03),
("Middleton", 0.04),
("Hyde", 0.03),
("Patel", 0.24),
("Owens", 0.03),
("Lamb", 0.04),
("Palmer", 0.11),
("Cooper", 0.22),
("McCarthy", 0.06),
("Black", 0.04),
("Dickinson", 0.04),
("Gilbert", 0.05),
("Leach", 0.03),
("North", 0.03),
("Byrne", 0.06),
("Frost", 0.05),
("Simmons", 0.04),
("Matthews", 0.11),
("Alexander", 0.04),
("Ahmed", 0.1),
("Gibbons", 0.03),
("Stevenson", 0.05),
("Rowley", 0.03),
("Miles", 0.05),
("Hanson", 0.03),
("Bolton", 0.03),
("Craig", 0.03),
("Ali", 0.12),
("Carroll", 0.04),
("Allan", 0.03),
("Sanderson", 0.03),
("Fletcher", 0.1),
("Burton", 0.08),
("Oliver", 0.07),
("Davison", 0.04),
("Douglas", 0.04),
("Field", 0.04),
("Pickering", 0.03),
("Pugh", 0.04),
("Rowe", 0.05),
("Mahmood", 0.03),
("Sykes", 0.03),
("Crawford", 0.03),
("Williams", 0.66),
("Parkin", 0.03),
("Patterson", 0.04),
("Power", 0.03),
("Price", 0.17),
("Murphy", 0.14),
("Hale", 0.03),
("Nicholls", 0.06),
("Hall", 0.25),
("Jones", 0.94),
("Hughes", 0.26),
("Stephenson", 0.05),
("Morley", 0.04),
("Knight", 0.11),
("Kerr", 0.03),
("Heath", 0.04),
("Pollard", 0.03),
("Lowe", 0.07),
("O'Sullivan", 0.04),
("Buckley", 0.05),
("Bond", 0.05),
("Dennis", 0.03),
("Lewis", 0.25),
("Weston", 0.04),
("Joyce", 0.03),
("Reynolds", 0.09),
("Bishop", 0.06),
("Norris", 0.04),
("Barry", 0.03),
("Whittaker", 0.04),
("Carey", 0.03),
("Hill", 0.22),
("Kent", 0.04),
("Ashton", 0.04),
("Wilkinson", 0.13),
("Powell", 0.12),
("Henderson", 0.06),
("Freeman", 0.06),
("Dunn", 0.07),
("Kaur", 0.09),
("French", 0.04),
("Parry", 0.06),
("Walton", 0.06),
("Fisher", 0.1),
("Naylor", 0.03),
("Duffy", 0.04),
("Humphreys", 0.04),
("Randall", 0.03),
("Bevan", 0.03),
("Doherty", 0.03),
("Moore", 0.21),
("Armstrong", 0.07),
("Sullivan", 0.05),
("Swift", 0.03),
("Pearce", 0.09),
("Tyler", 0.03),
("Bradshaw", 0.04),
("Allen", 0.19),
("Mellor", 0.03),
("Whitehead", 0.05),
("Jackson", 0.24),
("Grant", 0.07),
("Fox", 0.09),
("Wright", 0.28),
("Anderson", 0.13),
("Foster", 0.13),
("Gibbs", 0.04),
("Butler", 0.11),
("Jenkins", 0.1),
("John", 0.04),
("Morrison", 0.04),
("Talbot", 0.03),
("Blackburn", 0.03),
("Osborne", 0.05),
("Flynn", 0.04),
("Richards", 0.14),
("Hurst", 0.03),
("Bibi", 0.05),
("Houghton", 0.03),
("Johnson", 0.34),
("Yates", 0.06),
("Mistry", 0.03),
("Donnelly", 0.03),
("Parkinson", 0.04),
("Thomson", 0.05),
("Woods", 0.07),
("Todd", 0.04),
("Dawson", 0.08),
("Hart", 0.07),
("Graham", 0.1),
("Berry", 0.07),
("Willis", 0.05),
("Miah", 0.04),
("Brooks", 0.09),
("Horton", 0.03),
("Riley", 0.07),
("Lambert", 0.05),
("Waters", 0.04),
("Lynch", 0.05),
("Moss", 0.06),
("Slater", 0.05),
("Knowles", 0.04),
("Benson", 0.03),
("Adams", 0.13),
("King", 0.2),
("Davies", 0.48),
("Richardson", 0.15),
("Vincent", 0.03),
("Holmes", 0.11),
("Conway", 0.03),
("Marshall", 0.14),
("Faulkner", 0.03),
("Garner", 0.03),
("Booth", 0.08),
("Harrison", 0.2),
("Campbell", 0.11),
("Cole", 0.08),
("Goddard", 0.04),
("Walters", 0.05),
("Ellis", 0.13),
("Edwards", 0.27),
("Peters", 0.04),
("Atkinson", 0.08),
("Wood", 0.24),
("Briggs", 0.04),
("Elliott", 0.09),
("Chandler", 0.03),
("Hope", 0.03),
("Hunter", 0.07),
("Newman", 0.07),
("Pratt", 0.03),
("Rahman", 0.03),
("Hicks", 0.04),
("Cox", 0.14),
("Reid", 0.07),
("Morris", 0.21),
("Banks", 0.04),
("Myers", 0.03),
("Mitchell", 0.16),
("Davey", 0.04),
("Peacock", 0.03),
("Reed", 0.07),
("Carter", 0.15),
("Miller", 0.14),
("Perkins", 0.04),
("Read", 0.05),
("Hilton", 0.03),
("Moran", 0.03),
("Welch", 0.03),
("Vaughan", 0.04),
("Clements", 0.03),
("Griffin", 0.05),
("Russell", 0.1),
("O'Donnell", 0.03),
("Hobbs", 0.03),
("Marsh", 0.07),
("Porter", 0.07),
("Gill", 0.08),
("Leonard", 0.03),
("McKenzie", 0.03),
("Thornton", 0.04),
("Fitzgerald", 0.03),
("Greenwood", 0.05),
("Pearson", 0.1),
("James", 0.19),
("Coles", 0.03),
("Roberts", 0.33),
("Nelson", 0.05),
("Forster", 0.03),
("Gough", 0.03),
("Mann", 0.05),
("Law", 0.03),
("Barker", 0.1),
("Cartwright", 0.04),
("Bradley", 0.08),
("Sharp", 0.05),
("Warren", 0.06),
("Summers", 0.03),
("Little", 0.04),
("Perry", 0.08),
("Fuller", 0.04),
("West", 0.09),
("Mason", 0.12),
("Finch", 0.03),
("Norton", 0.03),
("Burke", 0.05),
("Holden", 0.04),
("Lee", 0.2),
("Smart", 0.04),
("Bull", 0.04),
("Bryant", 0.04),
("Gray", 0.12),
("Watts", 0.08),
("Brady", 0.03),
("Baker", 0.2),
("Barton", 0.05),
("Davis", 0.17),
("Baxter", 0.05),
("Taylor", 0.53),
("Carr", 0.07),
("Wong", 0.04),
("Cameron", 0.03),
("Gardiner", 0.03),
("Hawkins", 0.07),
("Shaw", 0.15),
("Wallace", 0.05),
("Young", 0.16),
("Shah", 0.06),
("Gregory", 0.07),
("Ball", 0.08),
("Norman", 0.04),
("Lawrence", 0.09),
("Bowen", 0.04),
("Wheeler", 0.05),
("Bartlett", 0.04),
("Sutton", 0.06),
("Lyons", 0.03),
("Hutchinson", 0.05),
("Poole", 0.05),
("Cooke", 0.06),
("Franklin", 0.03),
("Howe", 0.04),
("Walker", 0.27),
("Johnston", 0.05),
("Austin", 0.05),
("Chadwick", 0.03),
("Bell", 0.15),
("Wall", 0.04),
("Woodward", 0.05),
("Preston", 0.04),
("Bennett", 0.16),
("Murray", 0.1),
("Begum", 0.13),
("McDonald", 0.06),
("Hudson", 0.07),
("Cross", 0.06),
("Singh", 0.13),
("Howarth", 0.03),
("Hewitt", 0.05),
("Curtis", 0.06),
("Harding", 0.07),
("May", 0.05),
("Wells", 0.07),
("Giles", 0.03),
("Watson", 0.17),
("Nolan", 0.03),
("Andrews", 0.09),
("Hayward", 0.04),
("Schofield", 0.04),
("Hunt", 0.12),
("Robson", 0.06),
("Arnold", 0.05),
("Morgan", 0.19),
("Coates", 0.03),
("Page", 0.07),
("Simpson", 0.13),
("Stewart", 0.09),
("Robinson", 0.29),
("Fleming", 0.03),
("Scott", 0.18),
("Chambers", 0.06),
("Turner", 0.23),
("Watkins", 0.06),
)
)
prefixes_female = ("Mrs", "Ms", "Miss", "Dr")
prefixes_male = ("Mr", "Dr")
| Provider |
python | bottlepy__bottle | test/test_wsgi.py | {
"start": 8534,
"end": 14820
} | class ____(ServerTestBase):
def test_decorators(self):
def foo(): return bottle.request.method
bottle.get('/')(foo)
bottle.post('/')(foo)
bottle.put('/')(foo)
bottle.delete('/')(foo)
for verb in 'GET POST PUT DELETE'.split():
self.assertBody(verb, '/', method=verb)
def test_single_path(self):
@bottle.route('/a')
def test(): return 'ok'
self.assertBody('ok', '/a')
self.assertStatus(404, '/b')
def test_path_list(self):
@bottle.route(['/a','/b'])
def test(): return 'ok'
self.assertBody('ok', '/a')
self.assertBody('ok', '/b')
self.assertStatus(404, '/c')
def test_no_path(self):
@bottle.route()
def test(x=5): return str(x)
self.assertBody('5', '/test')
self.assertBody('6', '/test/6')
def test_no_params_at_all(self):
@bottle.route
def test(x=5): return str(x)
self.assertBody('5', '/test')
self.assertBody('6', '/test/6')
def test_method(self):
@bottle.route(method='gEt')
def test(): return 'ok'
self.assertBody('ok', '/test', method='GET')
self.assertStatus(200, '/test', method='HEAD')
self.assertStatus(405, '/test', method='PUT')
def test_method_list(self):
@bottle.route(method=['GET','post'])
def test(): return 'ok'
self.assertBody('ok', '/test', method='GET')
self.assertBody('ok', '/test', method='POST')
self.assertStatus(405, '/test', method='PUT')
def test_apply(self):
def revdec(func):
def wrapper(*a, **ka):
return reversed(func(*a, **ka))
return wrapper
@bottle.route('/nodec')
@bottle.route('/dec', apply=revdec)
def test(): return '1', '2'
self.assertBody('21', '/dec')
self.assertBody('12', '/nodec')
def test_apply_list(self):
def revdec(func):
def wrapper(*a, **ka):
return reversed(func(*a, **ka))
return wrapper
def titledec(func):
def wrapper(*a, **ka):
return ''.join(func(*a, **ka)).title()
return wrapper
@bottle.route('/revtitle', apply=[revdec, titledec])
@bottle.route('/titlerev', apply=[titledec, revdec])
def test(): return 'a', 'b', 'c'
self.assertBody('cbA', '/revtitle')
self.assertBody('Cba', '/titlerev')
def test_hooks(self):
@bottle.route()
def test():
return bottle.request.environ.get('hooktest','nohooks')
@bottle.hook('before_request')
def hook():
bottle.request.environ['hooktest'] = 'before'
@bottle.hook('after_request')
def hook(*args, **kwargs):
bottle.response.headers['X-Hook'] = 'after'
self.assertBody('before', '/test')
self.assertHeader('X-Hook', 'after', '/test')
def test_after_request_sees_HTTPError_response(self):
""" Issue #671 """
called = []
@bottle.hook('after_request')
def after_request():
called.append('after')
self.assertEqual(400, bottle.response.status_code)
@bottle.get('/')
def _get():
called.append("route")
bottle.abort(400, 'test')
self.urlopen("/")
self.assertEqual(["route", "after"], called)
def test_after_request_hooks_run_after_exception(self):
""" Issue #671 """
called = []
@bottle.hook('before_request')
def before_request():
called.append('before')
@bottle.hook('after_request')
def after_request():
called.append('after')
@bottle.get('/')
def _get():
called.append("route")
1/0
self.urlopen("/")
self.assertEqual(["before", "route", "after"], called)
def test_after_request_hooks_run_after_exception_in_before_hook(self):
""" Issue #671 """
called = []
@bottle.hook('before_request')
def before_request():
called.append('before')
1 / 0
@bottle.hook('after_request')
def after_request():
called.append('after')
@bottle.get('/')
def _get():
called.append("route")
self.urlopen("/")
self.assertEqual(["before", "after"], called)
def test_after_request_hooks_may_rise_response_exception(self):
""" Issue #671 """
called = []
@bottle.hook('after_request')
def after_request():
called.append('after')
bottle.abort(400, "hook_content")
@bottle.get('/')
def _get():
called.append("route")
return "XXX"
self.assertInBody("hook_content", "/")
self.assertEqual(["route", "after"], called)
def test_after_response_hook_can_set_headers(self):
""" Issue #1125 """
@bottle.route()
def test1():
return "test"
@bottle.route()
def test2():
return HTTPResponse("test", 200)
@bottle.route()
def test3():
raise HTTPResponse("test", 200)
@bottle.hook('after_request')
def hook():
bottle.response.headers["X-Hook"] = 'works'
for route in ("/test1", "/test2", "/test3"):
self.assertBody('test', route)
self.assertHeader('X-Hook', 'works', route)
def test_template(self):
@bottle.route(template='test {{a}} {{b}}')
def test(): return dict(a=5, b=6)
self.assertBody('test 5 6', '/test')
def test_template_opts(self):
@bottle.route(template=('test {{a}} {{b}}', {'b': 6}))
def test(): return dict(a=5)
self.assertBody('test 5 6', '/test')
def test_name(self):
@bottle.route(name='foo')
def test(x=5): return 'ok'
self.assertEqual('/test/6', bottle.url('foo', x=6))
def test_callback(self):
def test(x=5): return str(x)
rv = bottle.route(callback=test)
self.assertBody('5', '/test')
self.assertBody('6', '/test/6')
self.assertEqual(rv, test)
| TestRouteDecorator |
python | pytorch__pytorch | torch/utils/mkldnn.py | {
"start": 42,
"end": 1308
} | class ____(torch.jit.ScriptModule):
def __init__(self, dense_module, dtype) -> None:
super().__init__()
self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
if dense_module.bias is not None:
# Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
# we use fp32 dtype.
self.register_buffer('bias', dense_module.bias.to_mkldnn())
else:
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.weight = state[0].to_mkldnn()
self.bias = state[1].to_mkldnn()
self.training = state[2]
@torch.jit.script_method
def forward(self, x):
x_mkldnn = x if x.is_mkldnn else x.to_mkldnn()
y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias)
y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense()
return y
| MkldnnLinear |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py | {
"start": 311,
"end": 5172
} | class ____(SetBasedColumnMapExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py docstring">
"""Expect values in this column should be valid members of the Solfege scale: do, re, mi, etc."""
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py set">
set_ = [
"do",
"re",
"mi",
"fa",
"so",
"la",
"ti",
"Do",
"Re",
"Mi",
"Fa",
"So",
"La",
"Ti",
"DO",
"RE",
"MI",
"FA",
"SO",
"LA",
"TI",
]
set_camel_name = "SolfegeScale"
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py semantic_name">
set_semantic_name = "the Solfege scale"
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py examples">
examples = [
{
"data": {
"lowercase_solfege_scale": [
"do",
"re",
"mi",
"fa",
"so",
"la",
"ti",
"do",
],
"uppercase_solfege_scale": [
"DO",
"RE",
"MI",
"FA",
"SO",
"LA",
"TI",
"DO",
],
"mixed": ["do", "od", "re", "er", "mi", "im", "fa", "af"],
},
"only_for": ["pandas", "spark", "sqlite", "postgresql"],
"tests": [
{
"title": "positive_test_lowercase",
"exact_match_out": False,
"in": {"column": "lowercase_solfege_scale"},
"out": {
"success": True,
},
"include_in_gallery": True,
},
{
"title": "negative_test",
"exact_match_out": False,
"in": {"column": "mixed"},
"out": {
"success": False,
"unexpected_index_list": [1, 3, 5, 7],
},
"include_in_gallery": True,
},
{
"title": "postive_test_uppercase",
"exact_match_out": False,
"in": {"column": "uppercase_solfege_scale"},
"out": {
"success": True,
},
"include_in_gallery": True,
},
{
"title": "positive_test_with_mostly",
"exact_match_out": False,
"in": {"column": "mixed", "mostly": 0.4},
"out": {
"success": True,
"unexpected_index_list": [1, 3, 5, 7],
},
"include_in_gallery": True,
},
],
}
]
# </snippet>
map_metric = SetBasedColumnMapExpectation.register_metric(
set_camel_name=set_camel_name,
set_=set_,
)
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py library_metadata">
library_metadata = {
"tags": ["set-based"],
"contributors": ["@joegargery"],
}
# </snippet>
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_values_to_be_in_solfege_scale_set.py diagnostics">
ExpectColumnValuesToBeInSolfegeScaleSet(
column="lowercase_solfege_scale"
).print_diagnostic_checklist()
# </snippet>
# Note to users: code below this line is only for integration testing -- ignore!
diagnostics = ExpectColumnValuesToBeInSolfegeScaleSet(
column="lowercase_solfege_scale"
).run_diagnostics()
for check in diagnostics["tests"]:
assert check["test_passed"] is True
assert check["error_diagnostics"] is None
for check in diagnostics["errors"]:
assert check is None
for check in diagnostics["maturity_checklist"]["experimental"]:
if check["message"] == "Passes all linting checks":
continue
assert check["passed"] is True
| ExpectColumnValuesToBeInSolfegeScaleSet |
python | django__django | tests/test_client/tests.py | {
"start": 44066,
"end": 48515
} | class ____(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
("get", get_view),
("post", post_view),
("put", _generic_view),
("patch", _generic_view),
("delete", _generic_view),
("head", _generic_view),
("options", _generic_view),
("trace", trace_view),
)
request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method("/somewhere/")
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get("/somewhere/")
response = get_view(request)
self.assertContains(response, "This is a test")
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = "/somewhere/"
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
def test_request_factory_default_headers(self):
request = RequestFactory(
headers={
"authorization": "Bearer faketoken",
"x-another-header": "some other value",
}
).get("/somewhere/")
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
request = RequestFactory(
headers={
"Authorization": "Bearer faketoken",
"X-Another-Header": "some other value",
}
).get("/somewhere/")
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
def test_request_factory_sets_headers(self):
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method(
"/somewhere/",
headers={
"authorization": "Bearer faketoken",
"x-another-header": "some other value",
},
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
request = method(
"/somewhere/",
headers={
"Authorization": "Bearer faketoken",
"X-Another-Header": "some other value",
},
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
def test_request_factory_query_params(self):
tests = (
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
)
for method in tests:
with self.subTest(method=method):
factory = getattr(self.request_factory, method)
request = factory("/somewhere", query_params={"example": "data"})
self.assertEqual(request.GET["example"], "data")
@override_settings(ROOT_URLCONF="test_client.urls")
| RequestFactoryTest |
python | django-extensions__django-extensions | tests/testapp/jobs/sample.py | {
"start": 56,
"end": 178
} | class ____(BaseJob):
help = "My sample job."
def execute(self):
# executing empty sample job
pass
| Job |
python | realpython__materials | python-312/typing/inspect_string.py | {
"start": 58,
"end": 446
} | class ____(str):
def __len__(self):
return len(self.split())
def inspect(text: S) -> S:
print(f"'{text.upper()}' has length {len(text)}")
return text
# %% Python 3.12
# class Words(str):
# def __len__(self):
# return len(self.split())
#
#
# def inspect[S: str](text: S) -> S:
# print(f"'{text.upper()}' has length {len(text)}")
# return text
| Words |
python | encode__django-rest-framework | tests/utils.py | {
"start": 923,
"end": 1662
} | class ____:
"""
When used as a lookup with a `MockQueryset`, these objects
will raise a `TypeError`, as occurs in Django when making
queryset lookups with an incorrect type for the lookup value.
"""
def __eq__(self):
raise TypeError()
def mock_reverse(view_name, args=None, kwargs=None, request=None, format=None):
args = args or []
kwargs = kwargs or {}
value = (args + list(kwargs.values()) + ['-'])[0]
prefix = 'http://example.org' if request else ''
suffix = ('.' + format) if (format is not None) else ''
return '%s/%s/%s%s/' % (prefix, view_name, value, suffix)
def fail_reverse(view_name, args=None, kwargs=None, request=None, format=None):
raise NoReverseMatch()
| BadType |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-webflow/source_webflow/source.py | {
"start": 6220,
"end": 10601
} | class ____(WebflowStream):
"""
This stream is used for pulling "items" out of a given Webflow collection. Because there is not a fixed number of collections with
pre-defined names, each stream is an object that uses the passed-in collection name for the stream name.
Note that because the Webflow API works with collection ids rather than collection names, the collection id is
used for hitting the Webflow API.
An example of a collection is "Blog Posts", which contains a list of items, where each item is a JSON-representation of a blog article.
"""
# primary_key is not used as we don't do incremental syncs - https://docs.airbyte.com/understanding-airbyte/connections/
primary_key = None
# only want to create the name to id lookup table once
def __init__(self, site_id: str = None, collection_id: str = None, collection_name: str = None, **kwargs):
"""override __init__ to add collection-related variables"""
self.site_id = site_id
super().__init__(**kwargs)
self.collection_name = collection_name
self.collection_id = collection_id
@property
def name(self) -> str:
return self.collection_name
def path(self, **kwargs) -> str:
"""
The path to get the "items" in the requested collection uses the "_id" of the collection in the URL.
See: https://developers.webflow.com/#items
return collections/<collection_id>/items
"""
path = f"collections/{self.collection_id}/items"
return path
def next_page_token(self, response: requests.Response) -> Mapping[str, Any]:
decoded_response = response.json()
if decoded_response.get("count", 0) != 0 and decoded_response.get("items", []) != []:
# Webflow uses an offset for pagination https://developers.webflow.com/#item-model
offset = decoded_response["offset"] + decoded_response["count"]
return {"offset": offset}
else:
return {}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
# Webflow default pagination is 100, for debugging pagination we set this to a low value.
# This should be set back to 100 for production
params = {"limit": 100}
# Handle pagination by inserting the next page's token in the request parameters
if next_page_token:
params.update(next_page_token)
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""
Webflow items API returns an array of items contained in the "items" field.
"""
response_json = response.json()
# The items API returns records inside a container list called "items"
for item in response_json["items"]:
yield item
def get_json_schema(self) -> Mapping[str, Any]:
"""
Webflow has an API,but it is not consistent with json-schema.org schemas. We use the CollectionSchema stream
to get these schemas and to also map them to json-schema format.
"""
collection_id = self.collection_id
schema_stream = CollectionSchema(authenticator=self._session.auth, collection_id=collection_id)
schema_records = schema_stream.read_records(sync_mode="full_refresh")
# each record corresponds to a property in the json schema. So we loop over each of these properties
# and add it to the json schema.
json_schema = {}
for schema_property in schema_records:
json_schema.update(schema_property)
# Manually add in _cid and _id, which are not included in the list of fields sent back from Webflow,
# but which are necessary for joining data in the database
extra_fields = {
"_id": {"type": ["null", "string"]},
"_cid": {"type": ["null", "string"]},
"_locale": {"type": ["null", "string"]},
}
json_schema.update(extra_fields)
return {
"$schema": "http://json-schema.org/draft-07/schema#",
"additionalProperties": True,
"type": "object",
"properties": json_schema,
}
| CollectionContents |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 5266,
"end": 5823
} | class ____(_OracleDateLiteralRender, sqltypes.DateTime):
"""Provide the Oracle Database DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle Database
``DATE`` type supports a time value.
"""
__visit_name__ = "DATE"
def literal_processor(self, dialect):
return self._literal_processor_datetime(dialect)
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
| DATE |
python | graphql-python__graphene | graphene/tests/issues/test_425.py | {
"start": 341,
"end": 1262
} | class ____(ObjectType):
@classmethod
def __init_subclass_with_meta__(cls, other_attr="default", **options):
_meta = SpecialOptions(cls)
_meta.other_attr = other_attr
super(SpecialObjectType, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
def test_special_objecttype_could_be_subclassed():
class MyType(SpecialObjectType):
class Meta:
other_attr = "yeah!"
assert MyType._meta.other_attr == "yeah!"
def test_special_objecttype_could_be_subclassed_default():
class MyType(SpecialObjectType):
pass
assert MyType._meta.other_attr == "default"
def test_special_objecttype_inherit_meta_options():
class MyType(SpecialObjectType):
pass
assert MyType._meta.name == "MyType"
assert MyType._meta.default_resolver is None
assert MyType._meta.interfaces == ()
# InputObjectType
| SpecialObjectType |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 21106,
"end": 22470
} | class ____(Module):
r"""Applies the Gaussian Error Linear Units function.
.. math:: \text{GELU}(x) = x * \Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
When the approximate argument is 'tanh', Gelu is estimated with:
.. math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
Args:
approximate (str, optional): the gelu approximation algorithm to use:
``'none'`` | ``'tanh'``. Default: ``'none'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/GELU.png
Examples::
>>> m = nn.GELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["approximate"]
approximate: str
def __init__(self, approximate: str = "none") -> None:
super().__init__()
self.approximate = approximate
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.gelu(input, approximate=self.approximate)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"approximate={repr(self.approximate)}"
| GELU |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py | {
"start": 1011,
"end": 2227
} | class ____(test_util.TensorFlowTestCase):
def testDocStringExample(self):
splits = [0, 3, 3, 5, 6, 9]
expected = [0, 0, 0, 2, 2, 3, 4, 4, 4]
segment_ids = segment_id_ops.row_splits_to_segment_ids(splits)
self.assertAllEqual(segment_ids, expected)
def testEmptySplits(self):
# Note: the splits for an empty ragged tensor contains a single zero.
segment_ids = segment_id_ops.row_splits_to_segment_ids([0])
self.assertAllEqual(segment_ids, [])
def testErrors(self):
self.assertRaisesRegex(ValueError, r'Invalid row_splits: \[\]',
segment_id_ops.row_splits_to_segment_ids, [])
self.assertRaisesRegex(ValueError, r'splits must have dtype int32 or int64',
segment_id_ops.row_splits_to_segment_ids,
constant_op.constant([0.5]))
self.assertRaisesRegex(ValueError, r'Shape \(\) must have rank 1',
segment_id_ops.row_splits_to_segment_ids, 0)
self.assertRaisesRegex(ValueError, r'Shape \(1, 1\) must have rank 1',
segment_id_ops.row_splits_to_segment_ids, [[0]])
if __name__ == '__main__':
googletest.main()
| RaggedSplitsToSegmentIdsOpTest |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_connection_command.py | {
"start": 28402,
"end": 38405
} | class ____:
parser = cli_parser.get_parser()
def setup_method(self):
clear_db_connections(add_default_connections_back=False)
def test_cli_connections_import_should_return_error_if_file_does_not_exist(self, mocker):
mocker.patch("os.path.exists", return_value=False)
filepath = "/does/not/exist.json"
with pytest.raises(SystemExit, match=r"Missing connections file."):
connection_command.connections_import(self.parser.parse_args(["connections", "import", filepath]))
@pytest.mark.parametrize("filepath", ["sample.jso", "sample.environ"])
def test_cli_connections_import_should_return_error_if_file_format_is_invalid(self, filepath, mocker):
mocker.patch("os.path.exists", return_value=True)
with pytest.raises(
AirflowException,
match=(
"Unsupported file format. The file must have one of the following extensions: "
".env .json .yaml .yml"
),
):
connection_command.connections_import(self.parser.parse_args(["connections", "import", filepath]))
def test_cli_connections_import_should_load_connections(self, mocker):
# Sample connections to import
expected_connections = {
"new0": {
"conn_type": "postgres",
"description": "new0 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": '{"foo": "bar"}',
},
"new1": {
"conn_type": "mysql",
"description": "new1 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 3306,
"schema": "airflow",
"extra": '{"spam": "egg"}',
},
# Add new3 if the test expects an error about it
"new3": {
"conn_type": "sqlite",
"description": "new3 description",
"host": "host",
},
}
# First, create new3 to trigger the "already exists" error
with create_session() as session:
session.add(Connection(conn_id="new3", conn_type="sqlite"))
session.commit()
# We're not testing the behavior of _parse_secret_file
mocker.patch("airflow.secrets.local_filesystem._parse_secret_file", return_value=expected_connections)
mocker.patch("os.path.exists", return_value=True)
mock_print = mocker.patch("airflow.cli.commands.connection_command.print")
connection_command.connections_import(
self.parser.parse_args(["connections", "import", "sample.json"])
)
# Check all print calls to find the error message
print_calls = [str(call) for call in mock_print.call_args_list]
assert any("Could not import connection new3" in call for call in print_calls), (
f"Expected error message not found. Print calls: {print_calls}"
)
# Verify connections (exclude new3 since it should fail)
expected_imported = {k: v for k, v in expected_connections.items() if k != "new3"}
with create_session() as session:
current_conns = session.query(Connection).filter(Connection.conn_id.in_(["new0", "new1"])).all()
comparable_attrs = [
"conn_id",
"conn_type",
"description",
"host",
"login",
"password",
"port",
"schema",
"extra",
]
current_conns_as_dicts = {
current_conn.conn_id: {attr: getattr(current_conn, attr) for attr in comparable_attrs}
for current_conn in current_conns
}
assert expected_imported == current_conns_as_dicts
def test_cli_connections_import_should_not_overwrite_existing_connections(self, session, mocker):
mocker.patch("os.path.exists", return_value=True)
# Add a pre-existing connection "new3"
merge_conn(
Connection(
conn_id="new3",
conn_type="mysql",
description="original description",
host="mysql",
login="root",
password="password",
schema="airflow",
),
session=session,
)
# Sample connections to import, including a collision with "new3"
expected_connections = {
"new2": {
"conn_type": "postgres",
"description": "new2 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": '{"foo": "bar"}',
},
"new3": {
"conn_type": "mysql",
"description": "updated description",
"host": "host",
"login": "airflow",
"password": "new password",
"port": 3306,
"schema": "airflow",
"extra": '{"spam": "egg"}',
},
}
# We're not testing the behavior of _parse_secret_file, assume it successfully reads JSON, YAML or env
mocker.patch("airflow.secrets.local_filesystem._parse_secret_file", return_value=expected_connections)
mock_print = mocker.patch("airflow.cli.commands.connection_command.print")
connection_command.connections_import(
self.parser.parse_args(["connections", "import", "sample.json"])
)
assert "Could not import connection new3: connection already exists." in mock_print.call_args[0][0]
# Verify that the imported connections match the expected, sample connections
current_conns = session.query(Connection).all()
comparable_attrs = [
"conn_id",
"conn_type",
"description",
"host",
"login",
"password",
"port",
"schema",
"extra",
]
current_conns_as_dicts = {
current_conn.conn_id: {attr: getattr(current_conn, attr) for attr in comparable_attrs}
for current_conn in current_conns
}
assert current_conns_as_dicts["new2"] == expected_connections["new2"]
# The existing connection's description should not have changed
assert current_conns_as_dicts["new3"]["description"] == "original description"
def test_cli_connections_import_should_overwrite_existing_connections(self, mocker, session):
mocker.patch("os.path.exists", return_value=True)
mocker.patch(
"airflow.secrets.local_filesystem._parse_secret_file",
return_value={
"new2": {
"conn_type": "postgres",
"description": "new2 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": '{"foo": "bar"}',
},
"new3": {
"conn_type": "mysql",
"description": "updated description",
"host": "host",
"login": "airflow",
"password": "new password",
"port": 3306,
"schema": "airflow",
"extra": '{"spam": "egg"}',
},
},
)
# Add a pre-existing connection "new3"
merge_conn(
Connection(
conn_id="new3",
conn_type="mysql",
description="original description",
host="mysql",
login="root",
password="password",
schema="airflow",
),
session=session,
)
mock_print = mocker.patch("airflow.cli.commands.connection_command.print")
connection_command.connections_import(
self.parser.parse_args(["connections", "import", "sample.json", "--overwrite"])
)
assert (
"Could not import connection new3: connection already exists." not in mock_print.call_args[0][0]
)
# Verify that the imported connections match the expected, sample connections
current_conns = session.query(Connection).all()
comparable_attrs = [
"conn_id",
"conn_type",
"description",
"host",
"login",
"password",
"port",
"schema",
"extra",
]
current_conns_as_dicts = {
current_conn.conn_id: {attr: getattr(current_conn, attr) for attr in comparable_attrs}
for current_conn in current_conns
}
assert current_conns_as_dicts["new2"] == {
"conn_type": "postgres",
"description": "new2 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": '{"foo": "bar"}',
"conn_id": "new2",
}
# The existing connection should have been overwritten
assert current_conns_as_dicts["new3"] == {
"conn_type": "mysql",
"description": "updated description",
"host": "host",
"login": "airflow",
"password": "new password",
"port": 3306,
"schema": "airflow",
"extra": '{"spam": "egg"}',
"conn_id": "new3",
}
| TestCliImportConnections |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py | {
"start": 4377,
"end": 4432
} | class ____(PatchEmbed):
pass
| Qwen2_5_VisionPatchEmbed |
python | ray-project__ray | python/ray/tune/tests/test_env_callbacks.py | {
"start": 201,
"end": 3430
} | class ____(Callback):
pass
@pytest.mark.parametrize(
"env_value,expected_callback_count",
[
("my.module.Callback1", 1),
("module1.Callback1,module2.Callback2", 2),
("", 0),
(" ", 0),
("module.Callback1, ,module.Callback2", 2),
],
)
@patch("importlib.import_module")
def test_env_callbacks_loading(mock_import, env_value, expected_callback_count):
"""Test loading execution callbacks from environment variable with various inputs."""
if env_value:
with patch.dict(os.environ, {RAY_TUNE_CALLBACKS_ENV_VAR: env_value}):
mock_module = MagicMock()
mock_module.Callback1 = MockCallback
mock_module.Callback2 = MockCallback
mock_import.return_value = mock_module
callbacks = _initialize_env_callbacks()
assert len(callbacks) == expected_callback_count
for callback in callbacks:
assert isinstance(callback, MockCallback)
else:
with patch.dict(
os.environ, {RAY_TUNE_CALLBACKS_ENV_VAR: env_value}, clear=True
):
callbacks = _initialize_env_callbacks()
assert len(callbacks) == 0
@pytest.mark.parametrize(
"env_value,original_error_type",
[
("invalid_module", ValueError),
("module.Class", TypeError),
("module.NonExistentClass", AttributeError),
],
)
@patch("importlib.import_module")
def test_callback_loading_errors(mock_import, env_value, original_error_type):
"""Test handling of various error conditions when loading callbacks."""
with patch.dict(os.environ, {RAY_TUNE_CALLBACKS_ENV_VAR: env_value}):
if "invalid_module" in env_value:
pass
elif "NonExistentClass" in env_value:
mock_module = MagicMock()
del mock_module.NonExistentClass
mock_import.return_value = mock_module
else:
mock_module = MagicMock()
class RegularClass:
pass
mock_module.Class = RegularClass
mock_import.return_value = mock_module
with pytest.raises(
ValueError, match=f"Failed to import callback from '{env_value}'"
) as exc_info:
_initialize_env_callbacks()
assert isinstance(exc_info.value.__cause__, original_error_type)
def test_import_error_handling():
"""Test handling of import errors when loading callbacks."""
with patch.dict(
os.environ, {RAY_TUNE_CALLBACKS_ENV_VAR: "nonexistent.module.TestCallback"}
):
with pytest.raises(
ValueError,
match="Failed to import callback from 'nonexistent.module.TestCallback'",
) as exc_info:
_initialize_env_callbacks()
assert isinstance(exc_info.value.__cause__, ImportError)
def test_no_env_variable():
"""Test that no callbacks are loaded when environment variable is not set."""
if RAY_TUNE_CALLBACKS_ENV_VAR in os.environ:
del os.environ[RAY_TUNE_CALLBACKS_ENV_VAR]
callbacks = _initialize_env_callbacks()
assert len(callbacks) == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| MockCallback |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 46102,
"end": 52431
} | class ____(LukePreTrainedModel):
_tied_weights_keys = {
"entity_predictions.decoder.weight": "luke.entity_embeddings.entity_embeddings.weight",
"lm_head.bias": "lm_head.decoder.bias",
}
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.lm_head = LukeLMHead(config)
self.entity_predictions = EntityPredictionHead(config)
self.loss_fn = nn.CrossEntropyLoss()
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.LongTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
entity_labels: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, LukeMaskedLMOutput]:
r"""
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
loss = None
mlm_loss = None
logits = self.lm_head(outputs.last_hidden_state)
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1))
if loss is None:
loss = mlm_loss
mep_loss = None
entity_logits = None
if outputs.entity_last_hidden_state is not None:
entity_logits = self.entity_predictions(outputs.entity_last_hidden_state)
if entity_labels is not None:
mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1))
if loss is None:
loss = mep_loss
else:
loss = loss + mep_loss
if not return_dict:
return tuple(
v
for v in [
loss,
mlm_loss,
mep_loss,
logits,
entity_logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
]
if v is not None
)
return LukeMaskedLMOutput(
loss=loss,
mlm_loss=mlm_loss,
mep_loss=mep_loss,
logits=logits,
entity_logits=entity_logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity
token) for entity classification tasks, such as Open Entity.
"""
)
| LukeForMaskedLM |
python | facelessuser__pymdown-extensions | pymdownx/blocks/tab.py | {
"start": 274,
"end": 3815
} | class ____(Treeprocessor):
"""Tab tree processor."""
def __init__(self, md, config):
"""Initialize."""
super().__init__(md)
self.alternate = config['alternate_style']
self.slugify = config['slugify']
self.combine_header_slug = config['combine_header_slug']
self.sep = config["separator"]
def get_parent_header_slug(self, root, header_map, parent_map, el):
"""Attempt retrieval of parent header slug."""
parent = el
last_parent = parent
while parent is not root:
last_parent = parent
parent = parent_map[parent]
if parent in header_map:
headers = header_map[parent]
header = None
for i in list(parent):
if i is el and header is None:
break
if i is last_parent and header is not None:
return header.attrib.get("id", '')
if i in headers:
header = i
return ''
def run(self, doc):
"""Update tab IDs."""
# Get a list of id attributes
used_ids = set()
parent_map = {}
header_map = {}
if self.combine_header_slug:
parent_map = {c: p for p in doc.iter() for c in p}
for el in doc.iter():
if "id" in el.attrib:
if self.combine_header_slug and el.tag in HEADERS:
parent = parent_map[el]
if parent in header_map:
header_map[parent].append(el)
else:
header_map[parent] = [el]
used_ids.add(el.attrib["id"])
for el in doc.iter():
if isinstance(el.tag, str) and el.tag.lower() == 'div':
classes = el.attrib.get('class', '').split()
if 'tabbed-set' in classes and (not self.alternate or 'tabbed-alternate' in classes):
inputs = []
labels = []
if self.alternate:
for i in list(el):
if i.tag == 'input':
inputs.append(i)
if i.tag == 'div' and i.attrib.get('class', '') == 'tabbed-labels':
labels = [j for j in list(i) if j.tag == 'label']
else:
for i in list(el):
if i.tag == 'input':
inputs.append(i)
if i.tag == 'label':
labels.append(i)
# Generate slugged IDs
for inpt, label in zip(inputs, labels):
innerhtml = toc.render_inner_html(toc.remove_fnrefs(label), self.md)
innertext = html.unescape(toc.strip_tags(innerhtml))
if self.combine_header_slug:
parent_slug = self.get_parent_header_slug(doc, header_map, parent_map, el)
else:
parent_slug = ''
slug = self.slugify(innertext, self.sep)
if parent_slug:
slug = parent_slug + self.sep + slug
slug = toc.unique(slug, used_ids)
inpt.attrib["id"] = slug
label.attrib["for"] = slug
| TabbedTreeprocessor |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 44087,
"end": 44981
} | class ____:
async def test_flow_run_tags_added_at_call(self, prefect_client):
@flow
def my_flow():
pass
with tags("a", "b"):
state = my_flow(return_state=True)
flow_run = await prefect_client.read_flow_run(state.state_details.flow_run_id)
assert set(flow_run.tags) == {"a", "b"}
async def test_flow_run_tags_added_to_subflows(self, prefect_client):
@flow
def my_flow():
with tags("c", "d"):
return my_subflow(return_state=True)
@flow
def my_subflow():
pass
with tags("a", "b"):
subflow_state = await my_flow(return_state=True).result()
flow_run = await prefect_client.read_flow_run(
subflow_state.state_details.flow_run_id
)
assert set(flow_run.tags) == {"a", "b", "c", "d"}
| TestFlowRunTags |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 5208,
"end": 5687
} | class ____:
def __init__(self, module_name, filename, c_filename):
self.name = module_name
self.filename = filename
self.c_filename = c_filename
self.globals = {}
# {cython_lineno: min(c_linenos)}
self.lineno_cy2c = {}
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
def __repr__(self):
return simple_repr(self, renamed={"module_name": "name"}, state=False)
| CythonModule |
python | streamlit__streamlit | lib/tests/streamlit/runtime/app_session_test.py | {
"start": 3453,
"end": 35487
} | class ____(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
mock_runtime = MagicMock(spec=Runtime)
mock_runtime.media_file_mgr = MediaFileManager(
MemoryMediaFileStorage("/mock/media")
)
mock_runtime.cache_storage_manager = MemoryCacheStorageManager()
Runtime._instance = mock_runtime
def tearDown(self) -> None:
super().tearDown()
Runtime._instance = None
@patch(
"streamlit.runtime.app_session.uuid.uuid4", MagicMock(return_value="some_uuid")
)
def test_generates_uuid_for_session_id_if_no_override(self):
session = _create_test_session()
assert session.id == "some_uuid"
def test_uses_session_id_override_if_set(self):
session = _create_test_session(session_id_override="some_custom_session_id")
assert session.id == "some_custom_session_id"
@patch(
"streamlit.runtime.app_session.secrets_singleton.file_change_listener.disconnect"
)
def test_shutdown(self, patched_disconnect):
"""Test that AppSession.shutdown behaves sanely."""
session = _create_test_session()
mock_file_mgr = MagicMock(spec=UploadedFileManager)
session._uploaded_file_mgr = mock_file_mgr
session.shutdown()
assert session._state == AppSessionState.SHUTDOWN_REQUESTED
mock_file_mgr.remove_session_files.assert_called_once_with(session.id)
patched_disconnect.assert_called_once_with(session._on_secrets_file_changed)
# A 2nd shutdown call should have no effect.
session.shutdown()
assert session._state == AppSessionState.SHUTDOWN_REQUESTED
mock_file_mgr.remove_session_files.assert_called_once_with(session.id)
def test_shutdown_with_running_scriptrunner(self):
"""If we have a running ScriptRunner, shutting down should stop it."""
session = _create_test_session()
mock_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_scriptrunner
session.shutdown()
mock_scriptrunner.request_stop.assert_called_once()
mock_scriptrunner.reset_mock()
# A 2nd shutdown call should have no effect.
session.shutdown()
mock_scriptrunner.request_stop.assert_not_called()
def test_request_script_stop(self):
"""Verify that request_script_stop forwards the request to the scriptrunner."""
session = _create_test_session()
mock_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_scriptrunner
session.request_script_stop()
mock_scriptrunner.request_stop.assert_called()
def test_request_script_stop_no_scriptrunner(self):
"""Test that calling request_script_stop when there is no scriptrunner doesn't
result in an error.
"""
session = _create_test_session()
session._scriptrunner = None
# Nothing else to do here aside from ensuring that no exception is thrown.
session.request_script_stop()
def test_unique_id(self):
"""Each AppSession should have a unique ID"""
session1 = _create_test_session()
session2 = _create_test_session()
assert session1.id != session2.id
def test_creates_session_state_on_init(self):
session = _create_test_session()
assert isinstance(session.session_state, SessionState)
def test_creates_fragment_storage_on_init(self):
session = _create_test_session()
# NOTE: We only call assertIsNotNone here because protocols can't be used with
# isinstance (there's no need to as the static type checker already ensures
# the field has the correct type), and we don't want to mark
# MemoryFragmentStorage as @runtime_checkable.
assert session._fragment_storage is not None
def test_clear_cache_resets_session_state(self):
session = _create_test_session()
session._session_state["foo"] = "bar"
session._handle_clear_cache_request()
assert "foo" not in session._session_state
@patch("streamlit.runtime.caching.cache_data.clear")
@patch("streamlit.runtime.caching.cache_resource.clear")
def test_clear_cache_all_caches(self, clear_resource_caches, clear_data_caches):
session = _create_test_session()
session._handle_clear_cache_request()
clear_resource_caches.assert_called_once()
clear_data_caches.assert_called_once()
@patch(
"streamlit.runtime.app_session.secrets_singleton.file_change_listener.connect"
)
def test_request_rerun_on_secrets_file_change(self, patched_connect):
"""AppSession should add a secrets listener on creation."""
session = _create_test_session()
patched_connect.assert_called_once_with(session._on_secrets_file_changed)
@patch_config_options({"runner.fastReruns": False})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_rerun_with_no_scriptrunner(self, mock_create_scriptrunner: MagicMock):
"""If we don't have a ScriptRunner, a rerun request will result in
one being created."""
session = _create_test_session()
session.request_rerun(None)
mock_create_scriptrunner.assert_called_once_with(RerunData())
@patch_config_options({"runner.fastReruns": False})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_rerun_with_active_scriptrunner(self, mock_create_scriptrunner: MagicMock):
"""If we have an active ScriptRunner, it receives rerun requests."""
session = _create_test_session()
mock_active_scriptrunner = MagicMock(spec=ScriptRunner)
mock_active_scriptrunner.request_rerun = MagicMock(return_value=True)
session._scriptrunner = mock_active_scriptrunner
session.request_rerun(None)
# The active ScriptRunner will accept the rerun request...
mock_active_scriptrunner.request_rerun.assert_called_once_with(RerunData())
# So _create_scriptrunner should not be called.
mock_create_scriptrunner.assert_not_called()
@patch_config_options({"runner.fastReruns": False})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_rerun_with_stopped_scriptrunner(self, mock_create_scriptrunner: MagicMock):
"""If have a ScriptRunner but it's shutting down and cannot handle
new rerun requests, we'll create a new ScriptRunner."""
session = _create_test_session()
mock_stopped_scriptrunner = MagicMock(spec=ScriptRunner)
mock_stopped_scriptrunner.request_rerun = MagicMock(return_value=False)
session._scriptrunner = mock_stopped_scriptrunner
session.request_rerun(None)
# The stopped ScriptRunner will reject the request...
mock_stopped_scriptrunner.request_rerun.assert_called_once_with(RerunData())
# So we'll create a new ScriptRunner.
mock_create_scriptrunner.assert_called_once_with(RerunData())
@patch_config_options({"runner.fastReruns": True})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_fast_rerun(self, mock_create_scriptrunner: MagicMock):
"""If runner.fastReruns is enabled, a rerun request will stop the
existing ScriptRunner and immediately create a new one.
"""
session = _create_test_session()
mock_active_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_active_scriptrunner
session.request_rerun(None)
# The active ScriptRunner should be shut down.
mock_active_scriptrunner.request_rerun.assert_not_called()
mock_active_scriptrunner.request_stop.assert_called_once()
# And a new ScriptRunner should be created.
mock_create_scriptrunner.assert_called_once()
@patch_config_options({"runner.fastReruns": True})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_rerun_fragment_requests_existing_scriptrunner(
self, mock_create_scriptrunner: MagicMock
):
session = _create_test_session()
fragment_id = "my_fragment_id"
session._fragment_storage.set(fragment_id, lambda: None)
mock_active_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_active_scriptrunner
session.request_rerun(ClientState(fragment_id=fragment_id))
# The active ScriptRunner should *not* be shut down or stopped.
mock_active_scriptrunner.request_rerun.assert_called_once()
mock_active_scriptrunner.request_stop.assert_not_called()
# And a new ScriptRunner should *not* be created.
mock_create_scriptrunner.assert_not_called()
@patch_config_options({"runner.fastReruns": True})
@patch("streamlit.runtime.app_session.AppSession._create_scriptrunner")
def test_rerun_fragment_does_not_request_existing_scriptrunner_when_not_existing(
self, mock_create_scriptrunner: MagicMock
):
"""In case the fragment was removed by a preceding full app run, we want to exit
early and not request a rerun on the existing ScriptRunner.
"""
session = _create_test_session()
fragment_id = "my_fragment_id"
# leaving the following code line in to show that the fragment id
# is not set in the fragment storage!
# session._fragment_storage.set(fragment_id, lambda: None) # noqa: ERA001
mock_active_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_active_scriptrunner
session.request_rerun(ClientState(fragment_id=fragment_id))
# The active ScriptRunner should *not* be requested at all.
mock_active_scriptrunner.request_rerun.assert_not_called()
mock_active_scriptrunner.request_stop.assert_not_called()
# And a new ScriptRunner should *not* be created.
mock_create_scriptrunner.assert_not_called()
@patch("streamlit.runtime.app_session.ScriptRunner")
def test_create_scriptrunner(self, mock_scriptrunner: MagicMock):
"""Test that _create_scriptrunner does what it should."""
session = _create_test_session()
assert session._scriptrunner is None
session._create_scriptrunner(initial_rerun_data=RerunData())
# Assert that the ScriptRunner constructor was called.
mock_scriptrunner.assert_called_once_with(
session_id=session.id,
main_script_path=session._script_data.main_script_path,
session_state=session._session_state,
uploaded_file_mgr=session._uploaded_file_mgr,
script_cache=session._script_cache,
initial_rerun_data=RerunData(),
user_info={"email": "test@example.com"},
fragment_storage=session._fragment_storage,
pages_manager=session._pages_manager,
)
assert session._scriptrunner is not None
# And that the ScriptRunner was initialized and started.
scriptrunner: MagicMock = cast("MagicMock", session._scriptrunner)
scriptrunner.on_event.connect.assert_called_once_with(
session._on_scriptrunner_event
)
scriptrunner.start.assert_called_once()
@patch("streamlit.runtime.app_session.ScriptRunner", MagicMock(spec=ScriptRunner))
@patch("streamlit.runtime.app_session.AppSession._enqueue_forward_msg")
def test_ignore_events_from_noncurrent_scriptrunner(self, mock_enqueue: MagicMock):
"""If we receive ScriptRunnerEvents from anything other than our
current ScriptRunner, we should silently ignore them.
"""
session = _create_test_session()
session._create_scriptrunner(initial_rerun_data=RerunData())
# Our test AppSession is created with a mock EventLoop, so
# we pretend that this function is called on that same mock EventLoop.
with patch(
"streamlit.runtime.app_session.asyncio.get_running_loop",
return_value=session._event_loop,
):
session._handle_scriptrunner_event_on_event_loop(
sender=session._scriptrunner,
event=ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
forward_msg=ForwardMsg(),
)
mock_enqueue.assert_called_once_with(ForwardMsg())
mock_enqueue.reset_mock()
non_current_scriptrunner = MagicMock(spec=ScriptRunner)
session._handle_scriptrunner_event_on_event_loop(
sender=non_current_scriptrunner,
event=ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
forward_msg=ForwardMsg(),
)
mock_enqueue.assert_not_called()
@patch("streamlit.runtime.app_session.ScriptRunner", MagicMock(spec=ScriptRunner))
@patch("streamlit.runtime.app_session.AppSession._enqueue_forward_msg", MagicMock())
def test_resets_debug_last_backmsg_id_on_script_finished(self):
session = _create_test_session()
session._create_scriptrunner(initial_rerun_data=RerunData())
session._debug_last_backmsg_id = "some_backmsg_id"
with patch(
"streamlit.runtime.app_session.asyncio.get_running_loop",
return_value=session._event_loop,
):
session._handle_scriptrunner_event_on_event_loop(
sender=session._scriptrunner,
event=ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
forward_msg=ForwardMsg(),
)
assert session._debug_last_backmsg_id is None
@patch("streamlit.runtime.app_session.ScriptRunner", MagicMock(spec=ScriptRunner))
@patch("streamlit.runtime.app_session.AppSession._enqueue_forward_msg", MagicMock())
def test_sets_state_to_not_running_on_rerun_event(self):
session = _create_test_session()
session._create_scriptrunner(initial_rerun_data=RerunData())
session._state = AppSessionState.APP_IS_RUNNING
with patch(
"streamlit.runtime.app_session.asyncio.get_running_loop",
return_value=session._event_loop,
):
session._handle_scriptrunner_event_on_event_loop(
sender=session._scriptrunner,
event=ScriptRunnerEvent.SCRIPT_STOPPED_FOR_RERUN,
forward_msg=ForwardMsg(),
)
assert session._state == AppSessionState.APP_NOT_RUNNING
def test_passes_client_state_on_run_on_save(self):
session = _create_test_session()
session._run_on_save = True
session.request_rerun = MagicMock()
session._on_source_file_changed()
session._script_cache.clear.assert_called_once()
session.request_rerun.assert_called_once_with(session._client_state)
@patch(
"streamlit.runtime.app_session.AppSession._should_rerun_on_file_change",
MagicMock(return_value=False),
)
def test_does_not_rerun_if_not_current_page(self):
session = _create_test_session()
session._run_on_save = True
session.request_rerun = MagicMock()
session._on_source_file_changed("/fake/script_path.py")
# Clearing the cache should still have been called
session._script_cache.clear.assert_called_once()
assert not session.request_rerun.called
@patch.object(
PagesManager,
"get_pages",
MagicMock(
return_value={
"hash1": {"page_name": "page_1", "icon": "", "script_path": "script1"},
"hash2": {
"page_name": "page_2",
"icon": "🎉",
"script_path": "script2",
},
}
),
)
def test_tags_fwd_msgs_with_last_backmsg_id_if_set(self):
session = _create_test_session()
session._debug_last_backmsg_id = "some backmsg id"
msg = ForwardMsg()
session._enqueue_forward_msg(msg)
assert msg.debug_last_backmsg_id == "some backmsg id"
@patch("streamlit.runtime.app_session.config.on_config_parsed")
@patch(
"streamlit.runtime.app_session.secrets_singleton.file_change_listener.connect"
)
@patch.object(
PagesManager,
"get_pages",
MagicMock(return_value={}),
)
def test_registers_file_watchers(
self,
patched_secrets_connect,
patched_on_config_parsed,
):
session = _create_test_session()
session._local_sources_watcher.register_file_change_callback.assert_called_once_with(
session._on_source_file_changed
)
patched_on_config_parsed.assert_called_once_with(
session._on_source_file_changed, force_connect=True
)
patched_secrets_connect.assert_called_once_with(
session._on_secrets_file_changed
)
@patch.object(
PagesManager,
"get_pages",
MagicMock(return_value={}),
)
def test_recreates_local_sources_watcher_if_none(self):
session = _create_test_session()
session._local_sources_watcher = None
session.register_file_watchers()
assert session._local_sources_watcher
@patch_config_options({"server.fileWatcherType": "none"})
def test_no_local_sources_watcher_if_file_watching_disabled(self):
session = _create_test_session()
assert not session._local_sources_watcher
@patch(
"streamlit.runtime.app_session.secrets_singleton.file_change_listener.disconnect"
)
def test_disconnect_file_watchers(self, patched_secrets_disconnect):
session = _create_test_session()
with (
patch.object(
session._local_sources_watcher, "close"
) as patched_close_local_sources_watcher,
patch.object(
session, "_stop_config_listener"
) as patched_stop_config_listener,
patch.object(
session, "_stop_pages_listener"
) as patched_stop_pages_listener,
):
session.disconnect_file_watchers()
patched_close_local_sources_watcher.assert_called_once()
patched_stop_config_listener.assert_called_once()
patched_stop_pages_listener.assert_called_once()
patched_secrets_disconnect.assert_called_once_with(
session._on_secrets_file_changed
)
assert session._local_sources_watcher is None
assert session._stop_config_listener is None
assert session._stop_pages_listener is None
def test_disconnect_file_watchers_removes_refs(self):
"""Test that calling disconnect_file_watchers on the AppSession
removes references to it so it is eligible to be garbage collected after the
method is called.
"""
session = _create_test_session()
# Various listeners should have references to session file/pages/secrets changed
# handlers.
assert len(gc.get_referrers(session)) > 0
session.disconnect_file_watchers()
# Run the gc to ensure that we don't count refs to session from an object that
# would have been garbage collected along with the session. We run the gc a few
# times for good measure as otherwise we've previously seen weirdness in CI
# where this test would fail for certain Python versions (exact reasons
# unknown), so it seems like the first gc sweep may not always pick up the
# session.
gc.collect(2)
gc.collect(2)
gc.collect(2)
assert len(gc.get_referrers(session)) == 0
@patch("streamlit.runtime.app_session.AppSession._enqueue_forward_msg")
def test_handle_file_urls_request(self, mock_enqueue):
session = _create_test_session()
upload_file_urls = [
UploadFileUrlInfo(
file_id="file_1",
upload_url="upload_file_url_1",
delete_url="delete_file_url_1",
),
UploadFileUrlInfo(
file_id="file_2",
upload_url="upload_file_url_2",
delete_url="delete_file_url_2",
),
UploadFileUrlInfo(
file_id="file_3",
upload_url="upload_file_url_3",
delete_url="delete_file_url_3",
),
]
session._uploaded_file_mgr.get_upload_urls.return_value = upload_file_urls
session._handle_file_urls_request(
FileURLsRequest(
request_id="my_id",
file_names=["file_1", "file_2", "file_3"],
session_id=session.id,
)
)
session._uploaded_file_mgr.get_upload_urls.assert_called_once_with(
session.id, ["file_1", "file_2", "file_3"]
)
expected_msg = ForwardMsg(
file_urls_response=FileURLsResponse(
response_id="my_id",
file_urls=[
FileURLs(
file_id=url.file_id,
upload_url=url.upload_url,
delete_url=url.delete_url,
)
for url in upload_file_urls
],
)
)
mock_enqueue.assert_called_once_with(expected_msg)
def test_manual_rerun_preserves_context_info(self):
"""Test that manual reruns preserve context info."""
session = _create_test_session()
# Create a client state with context info (simulating a manual rerun from frontend)
client_state = ClientState()
client_state.context_info.timezone = "Europe/Berlin"
client_state.context_info.locale = "de-DE"
client_state.query_string = "test_query"
client_state.page_script_hash = "test_hash"
client_state.is_auto_rerun = False
session._create_scriptrunner = MagicMock()
session.request_rerun(client_state)
# Verify that _create_scriptrunner was called
session._create_scriptrunner.assert_called_once()
# Get the RerunData that was passed to _create_scriptrunner
rerun_data = session._create_scriptrunner.call_args[0][0]
# Verify that context_info was preserved
assert rerun_data.context_info is not None
assert rerun_data.context_info.timezone == "Europe/Berlin"
assert rerun_data.context_info.locale == "de-DE"
assert rerun_data.query_string == "test_query"
assert rerun_data.page_script_hash == "test_hash"
assert rerun_data.is_auto_rerun is False
def test_context_info_preserved_in_client_state_on_shutdown(self):
"""Test that context_info is preserved in client_state during SHUTDOWN event."""
session = _create_test_session()
# Set up initial context info in client state
session._client_state.context_info.timezone = "America/New_York"
session._client_state.context_info.locale = "en-US"
session._client_state.query_string = "initial_query"
session._client_state.page_script_hash = "initial_hash"
# Create a mock ScriptRunner and simulate SHUTDOWN event
mock_scriptrunner = MagicMock(spec=ScriptRunner)
session._scriptrunner = mock_scriptrunner
# Create client state with context info (as would be sent in SHUTDOWN event)
shutdown_client_state = ClientState()
shutdown_client_state.context_info.timezone = "Europe/London"
shutdown_client_state.context_info.locale = "en-GB"
shutdown_client_state.query_string = "shutdown_query"
shutdown_client_state.page_script_hash = "shutdown_hash"
with patch(
"streamlit.runtime.app_session.asyncio.get_running_loop",
return_value=session._event_loop,
):
session._handle_scriptrunner_event_on_event_loop(
sender=mock_scriptrunner,
event=ScriptRunnerEvent.SHUTDOWN,
client_state=shutdown_client_state,
)
# Verify that the client state was updated with the shutdown data
assert session._client_state.context_info.timezone == "Europe/London"
assert session._client_state.context_info.locale == "en-GB"
assert session._client_state.query_string == "shutdown_query"
assert session._client_state.page_script_hash == "shutdown_hash"
def _mock_get_options_for_section(
overrides: dict[str, Any] | None = None,
) -> Callable[..., Any]:
"""Mock config.get_options_for_section for testing.
Expected override structure:
{
"sidebar": {...}, # Options for theme.sidebar
"light": { # Options for theme.light
"sidebar": {...}, # Options for theme.light.sidebar
...other options...
},
"dark": { # Options for theme.dark
"sidebar": {...}, # Options for theme.dark.sidebar
...other options...
},
...other theme options...
}
"""
if not overrides:
overrides = {}
# Default options for sections (excluding main theme which has unique options like base)
section_default_opts = {
"backgroundColor": "white",
"baseRadius": "1.2rem",
"buttonRadius": "medium",
"borderColor": "#ff0000",
"dataframeBorderColor": "#280f63",
"codeFont": "Monaspace Argon",
"codeFontSize": "12px",
"codeFontWeight": 500,
"font": "Inter",
"headingFont": "Inter Bold",
"headingFontSizes": ["2.125rem", "2rem", "1.875rem"],
"headingFontWeights": [700, 700, 600],
"linkColor": "#2EC163",
"linkUnderline": False,
"primaryColor": "red",
"secondaryBackgroundColor": "blue",
"showWidgetBorder": True,
"textColor": "black",
"codeBackgroundColor": "blue",
"dataframeHeaderBackgroundColor": "purple",
"redColor": "red",
"orangeColor": "orange",
"yellowColor": "yellow",
"blueColor": "blue",
"greenColor": "green",
"violetColor": "violet",
"grayColor": "gray",
"redBackgroundColor": "#ff8c8c",
"orangeBackgroundColor": "#ffd16a",
"yellowBackgroundColor": "#ffff59",
"blueBackgroundColor": "#60b4ff",
"greenBackgroundColor": "#5ce488",
"violetBackgroundColor": "#b27eff",
"grayBackgroundColor": "#bfc5d3",
"redTextColor": "#ffabab",
"orangeTextColor": "#ffe08e",
"yellowTextColor": "#ffff7d",
"blueTextColor": "#83c9ff",
"greenTextColor": "#7defa1",
"violetTextColor": "#c89dff",
"grayTextColor": "#d5dae5",
"codeTextColor": "#7defa1",
}
# Main theme options (includes unique options like base, baseFontSize, etc.)
theme_default_opts = {
"backgroundColor": "white",
"base": "dark",
"baseFontSize": 14,
"baseFontWeight": 300,
"baseRadius": "1.2rem",
"buttonRadius": "medium",
"borderColor": "#ff0000",
"dataframeBorderColor": "#280f63",
"codeFont": "Monaspace Argon",
"codeFontSize": "12px",
"codeFontWeight": 300,
"headingFontSizes": [
"2.875rem",
"2.75rem",
"2rem",
"1.75rem",
"1.5rem",
"1.25rem",
],
"headingFontWeights": [700, 700, 600, 600],
"font": "Inter",
"fontFaces": [
{
"family": "Inter Bold",
"url": "https://raw.githubusercontent.com/rsms/inter/refs/heads/master/docs/font-files/Inter-Bold.woff2",
},
{
"family": "Inter",
"url": "https://raw.githubusercontent.com/rsms/inter/refs/heads/master/docs/font-files/Inter-Regular.woff2",
"weight": 400,
},
{
"family": "Monaspace Argon",
"url": "https://raw.githubusercontent.com/githubnext/monaspace/refs/heads/main/fonts/webfonts/MonaspaceArgon-Regular.woff2",
"weight": 400,
},
],
"headingFont": "Inter Bold",
"linkColor": "#2EC163",
"linkUnderline": False,
"primaryColor": "coral",
"secondaryBackgroundColor": "blue",
"showWidgetBorder": True,
"showSidebarBorder": True,
"textColor": "black",
"codeTextColor": "#09ab3b",
"codeBackgroundColor": "blue",
"dataframeHeaderBackgroundColor": "purple",
"chartCategoricalColors": [
"#7fc97f",
"#beaed4",
"#fdc086",
"#ffff99",
"#386cb0",
"#f0027f",
"#bf5b17",
"#666666",
],
"chartSequentialColors": [
"#dffde9",
"#c0fcd3",
"#9ef6bb",
"#7defa1",
"#5ce488",
"#3dd56d",
"#21c354",
"#09ab3b",
"#158237",
"#177233",
],
"redColor": "#7d353b",
"orangeColor": "#d95a00",
"yellowColor": "#916e10",
"blueColor": "#004280",
"greenColor": "#177233",
"violetColor": "#3f3163",
"grayColor": "#0e1117",
"redBackgroundColor": "#ff4b4b",
"orangeBackgroundColor": "#ffa421",
"yellowBackgroundColor": "#ffe312",
"blueBackgroundColor": "#1c83e1",
"greenBackgroundColor": "#21c354",
"violetBackgroundColor": "#803df5",
"grayBackgroundColor": "#808495",
"redTextColor": "#ffabab",
"orangeTextColor": "#ffe08e",
"yellowTextColor": "#ffff7d",
"blueTextColor": "#83c9ff",
"greenTextColor": "#7defa1",
"violetTextColor": "#c89dff",
"grayTextColor": "#d5dae5",
}
def _apply_overrides(base: dict, overrides_dict: dict, exclude_keys: set) -> dict:
"""Apply overrides to base dict, excluding specified keys."""
result = base.copy()
for k, v in overrides_dict.items():
if k not in exclude_keys:
result[k] = v
return result
def get_options_for_section(section: str) -> dict:
if section == "theme":
# Apply root-level overrides, excluding nested sections
return _apply_overrides(
theme_default_opts, overrides, {"sidebar", "light", "dark"}
)
if section == "theme.sidebar":
# Apply sidebar overrides if present
return _apply_overrides(
section_default_opts, overrides.get("sidebar", {}), set()
)
if section == "theme.light":
# Apply light overrides, excluding nested sidebar
return _apply_overrides(
section_default_opts, overrides.get("light", {}), {"sidebar"}
)
if section == "theme.dark":
# Apply dark overrides, excluding nested sidebar
return _apply_overrides(
section_default_opts, overrides.get("dark", {}), {"sidebar"}
)
if section == "theme.light.sidebar":
# Apply light.sidebar overrides if present
light_sidebar = overrides.get("light", {}).get("sidebar", {})
return _apply_overrides(section_default_opts, light_sidebar, set())
if section == "theme.dark.sidebar":
# Apply dark.sidebar overrides if present
dark_sidebar = overrides.get("dark", {}).get("sidebar", {})
return _apply_overrides(section_default_opts, dark_sidebar, set())
# Fallback to real config for any other sections
return config.get_options_for_section(section)
return get_options_for_section
| AppSessionTest |
python | numba__numba | numba/core/bytecode.py | {
"start": 9253,
"end": 13032
} | class ____(object):
"""
The decoded bytecode of a function, and related information.
"""
__slots__ = ('func_id', 'co_names', 'co_varnames', 'co_consts',
'co_cellvars', 'co_freevars', 'exception_entries',
'table', 'labels')
def __init__(self, func_id):
code = func_id.code
labels = set(x + _FIXED_OFFSET for x in dis.findlabels(code.co_code))
labels.add(0)
# A map of {offset: ByteCodeInst}
table = OrderedDict(ByteCodeIter(code))
self._compute_lineno(table, code)
self.func_id = func_id
self.co_names = code.co_names
self.co_varnames = code.co_varnames
self.co_consts = code.co_consts
self.co_cellvars = code.co_cellvars
self.co_freevars = code.co_freevars
self.table = table
self.labels = sorted(labels)
@classmethod
def _compute_lineno(cls, table, code):
"""
Compute the line numbers for all bytecode instructions.
"""
for offset, lineno in dis.findlinestarts(code):
adj_offset = offset + _FIXED_OFFSET
if adj_offset in table:
table[adj_offset].lineno = lineno
# Assign unfilled lineno
# Start with first bytecode's lineno
known = code.co_firstlineno
for inst in table.values():
if inst.lineno is not None and inst.lineno >= 0:
known = inst.lineno
else:
inst.lineno = known
return table
def __iter__(self):
return iter(self.table.values())
def __getitem__(self, offset):
return self.table[offset]
def __contains__(self, offset):
return offset in self.table
def dump(self):
def label_marker(i):
if i[1].offset in self.labels:
return '>'
else:
return ' '
return '\n'.join('%s %10s\t%s' % ((label_marker(i),) + i)
for i in self.table.items()
if i[1].opname != "CACHE")
@classmethod
def _compute_used_globals(cls, func, table, co_consts, co_names):
"""
Compute the globals used by the function with the given
bytecode table.
"""
d = {}
globs = func.__globals__
builtins = globs.get('__builtins__', utils.builtins)
if isinstance(builtins, ModuleType):
builtins = builtins.__dict__
# Look for LOAD_GLOBALs in the bytecode
for inst in table.values():
if inst.opname == 'LOAD_GLOBAL':
name = co_names[_fix_LOAD_GLOBAL_arg(inst.arg)]
if name not in d:
try:
value = globs[name]
except KeyError:
value = builtins[name]
d[name] = value
# Add globals used by any nested code object
for co in co_consts:
if isinstance(co, CodeType):
subtable = OrderedDict(ByteCodeIter(co))
d.update(cls._compute_used_globals(func, subtable,
co.co_consts, co.co_names))
return d
def get_used_globals(self):
"""
Get a {name: value} map of the globals used by this code
object and any nested code objects.
"""
return self._compute_used_globals(self.func_id.func, self.table,
self.co_consts, self.co_names)
def _fix_LOAD_GLOBAL_arg(arg):
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
return arg >> 1
elif PYVERSION in ((3, 10),):
return arg
else:
raise NotImplementedError(PYVERSION)
| _ByteCode |
python | google__jax | tests/checkify_test.py | {
"start": 30870,
"end": 43582
} | class ____(jtu.JaxTestCase):
def test_assert_primitive_impl(self):
def f():
checkify.check(False, "hi")
with self.assertRaisesRegex(JaxRuntimeError, "hi"):
f()
def test_assert_primitive_lowering(self):
@jax.jit
def f():
checkify.check(False, "hi")
with self.assertRaisesRegex(ValueError, "Cannot abstractly evaluate"):
f()
def test_assert_primitive_jaxpr_effects(self):
def f(x):
checkify.check(False, "hi: {}", x)
jaxpr = jax.make_jaxpr(f)(jnp.ones(4, jnp.int32))
self.assertSetEqual(jaxpr.effects,
{ErrorEffect(FailedCheckError, (
jax.ShapeDtypeStruct((4,), jnp.int32),))})
def g(x, y):
checkify.check(False, "hi: {} {}", x, y)
self.assertSetEqual(
jax.make_jaxpr(g)(
jnp.ones(4, jnp.int32), jnp.ones(2, jnp.float32)).effects,
{ErrorEffect(FailedCheckError, (
jax.ShapeDtypeStruct((4,), jnp.int32),
jax.ShapeDtypeStruct((2,), jnp.float32)))})
def test_assert_primitive_eval_shape(self):
# The check is abstractly evaluated but not lowered.
def f():
checkify.check(False, "hi")
jax.eval_shape(f) # does not crash.
def test_assert_discharging(self):
@checkify.checkify
def f(x):
checkify.check(x > 0, "must be positive!")
return jnp.log(x)
err, _ = f(1.)
self.assertIsNone(err.get())
err, _ = f(0.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "must be positive")
f = jax.jit(f)
err, _ = f(1.)
self.assertIsNone(err.get())
err, _ = f(0.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "must be positive")
def test_assert_discharging_no_data_dependence(self):
@jax.jit
def g(x):
@checkify.checkify
def f():
# Note that x is not an argument to the checkified function.
checkify.check(x > 0, "must be positive!")
return jnp.log(x)
return f()
err, _ = g(1.)
self.assertIsNone(err.get())
err, _ = g(0.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "must be positive")
def test_assert_discharging_scan(self):
def body(carry, x):
checkify.check(jnp.all(x > 0), "must be positive")
return carry, x
def f(x):
return jax.lax.scan(body, (None,), x)
err, _ = checkify.checkify(f)(jnp.array([-1]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "must be positive")
err, _ = checkify.checkify(f)(jnp.array([1, 0, -1]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "must be positive")
def test_assert_discharging_while_loop(self):
def while_cond(val):
i, _ = val
checkify.check(i < 0, "i must be negative")
return i < 2
def while_body(val):
i, x = val
checkify.check(x < 0, "x must be negative")
return i+1., x+1
@jax.jit
def f(init_i, init_val):
return lax.while_loop(while_cond, while_body, (init_i, init_val))
checked_f = checkify.checkify(f)
err, _ = checked_f(0, 1)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "i must be negative")
err, _ = checked_f(-1, 0)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x must be negative")
def test_assert_discharging_cond(self):
def true_branch(x):
checkify.check(jnp.all(x != 0.), "x cannot be 0")
return 1/x
def false_branch(x):
checkify.check(jnp.all(x >= 0), "x must be positive")
return x*2
@jax.jit
def f(pred, x):
return lax.cond(pred, true_branch, false_branch, x)
checked_f = checkify.checkify(f)
err, _ = checked_f(True, 0.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x cannot be 0")
err, _ = checked_f(False, 0.)
self.assertIsNone(err.get())
err, _ = checked_f(False, -1.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x must be positive")
err, _ = checked_f(True, -1.)
self.assertIsNone(err.get())
def test_assert_batching_rule(self):
@jax.vmap
def f(x):
checkify.check(jnp.sum(x) == 1., "x must sum to one.")
return x
no_failures = jnp.array([[0.5, 0.5], [1., 0.]])
one_batch_fails = jnp.array([[0.5, 0.5], [1, 1]])
mult_batch_fail = jnp.array([[0.5, 0.5], [1, 1], [2, 2]])
f(no_failures)
with self.assertRaisesRegex(JaxRuntimeError, "x must sum to one."):
f(one_batch_fails)
with self.assertRaisesRegex(JaxRuntimeError, "x must sum to one."):
f(mult_batch_fail)
checked_f = checkify.checkify(f)
err, _ = checked_f(no_failures)
self.assertIsNone(err.get())
err, _ = checked_f(one_batch_fails)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x must sum to one")
err, _ = checked_f(mult_batch_fail)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x must sum to one")
def test_check_error(self):
def g():
checkify.check(False, "hi")
def f():
err, _ = checkify.checkify(g)()
checkify.check_error(err)
with self.assertRaisesRegex(JaxRuntimeError, "hi"):
f()
f = checkify.checkify(f)
err, none = f()
self.assertIsNone(none)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "hi")
def test_check_error_scanned(self):
def body(carry, x):
checkify.check(jnp.all(x > 0), "should be positive")
return carry, x
def checked_body(carry, x):
err, (carry, x) = checkify.checkify(body)(carry, x)
return carry, (x, err)
def f(x):
_, (xs, errs) = jax.lax.scan(checked_body, (None,), x)
checkify.check_error(errs)
return xs
err, _ = checkify.checkify(f)(jnp.array([-1]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "should be positive")
err, _ = checkify.checkify(f)(jnp.array([1, 0, -1]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "should be positive")
def test_discharge_recharge(self):
def ejit(f):
f = checkify.checkify(f)
f = jax.jit(f)
def jitted_f(*args):
err, out = f(*args)
checkify.check_error(err)
return out
return jitted_f
@ejit
def f(pred):
assert python_should_be_running
checkify.check(pred, "foo")
python_should_be_running = True
f(True)
python_should_be_running = False
f(True)
with self.assertRaisesRegex(JaxRuntimeError, "foo"):
f(False)
def test_cond_of_named_call(self):
def g(x):
branch = jax.named_call(lambda x: x)
out = jax.lax.cond(True, branch, branch, x)
return out
checkify.checkify(g)(0.) # does not crash
def test_grad(self):
@jax.grad
def f(x):
checkify.check(jnp.all(x > 0), "should be positive!")
return x
f = checkify.checkify(f)
err, _ = f(1.)
self.assertIsNone(err.get())
err, _ = f(0.)
self.assertIsNotNone(err.get())
self.assertIn("should be positive", err.get())
def test_checkify_of_vmap_of_while_errors(self):
@jax.vmap
def fun(n, v):
def while_cond(s):
counter, value = s
checkify.check(value < 6, "value needs to be less than 6!")
return counter > 0
def while_body(s):
counter, value = s
checkify.check(value >= 0, "value needs to be positive!")
return counter/value, value - 1.
_, result = jax.lax.while_loop(while_cond, while_body, (n, v))
return result
checked_f = checkify.checkify(fun, errors=checkify.all_checks)
with self.assertRaisesRegex(ValueError, "checkify-of-vmap-of-while"):
checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([5., 2., 4.]))
# TODO(lenamartens): re-enable assertions below.
# self.assertIsNotNone(err.get())
# self.assertStartsWith(err.get(), "division by zero")
# err, _ = checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([5., 2., -4.]))
# self.assertIsNotNone(err.get())
# self.assertStartsWith(err.get(), "value needs to be positive")
# err, _ = checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([6., 2., -4.]))
# self.assertIsNotNone(err.get())
# self.assertStartsWith(err.get(), "value needs to be less than 6")
def test_checkify_of_vmap_of_while_masked_errors(self):
def cond(x):
return x < 5
def body(x):
# This will only trigger in the masked portion of the batched while.
checkify.check(x < 5, "should never happen")
return x + 1
@jax.vmap
def fun(x):
return lax.while_loop(cond, body, x)
checked_f = checkify.checkify(fun)
with self.assertRaisesRegex(ValueError, "checkify-of-vmap-of-while"):
checked_f(jnp.arange(5))
# TODO(lenamartens): re-enable assertions below.
# self.assertIsNone(err.get())
def test_assert_cond_no_data_dependence(self):
def true_fun():
return checkify.check(False, "hi!")
def false_fun():
return checkify.check(False, "bye!")
def f():
return jax.lax.cond(True, true_fun, false_fun)
f = checkify.checkify(f)
err, _ = f()
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "hi!")
def test_assert_switch_no_data_dependence(self):
def branch():
checkify.check(False, "hi!")
def f():
return lax.switch(0, [branch]*3)
checked_f = checkify.checkify(f)
err, _ = checked_f()
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "hi!")
def test_debug_check_noop(self):
def f(x):
checkify.debug_check(jnp.all(x != x), "{x} cannot be {x}", x=x)
return x
x = jnp.ones(())
f(x) # no error.
jax.jit(f)(x) # no error.
jax.vmap(f)(jnp.ones((2,))) # no error.
jax.grad(f)(x) # no error.
@parameterized.named_parameters(("with_jit", True), ("without_jit", False))
def test_debug_check_nonscalar_pred(self, with_jit):
def f(x):
checkify.debug_check(x != x, "{x} cannot be {x}", x=x)
return x
checked_f = checkify.checkify(f)
if with_jit:
checked_f = jax.jit(checked_f)
with self.assertRaisesRegex(TypeError, "debug_check takes a scalar pred"):
checked_f(jnp.ones((5,)))
@parameterized.named_parameters(("with_jit", True), ("without_jit", False))
def test_debug_check(self, with_jit):
def f(x):
checkify.debug_check(jnp.all(x != x), "{x} cannot be {x}", x=x)
return x
checked_f = checkify.checkify(f)
if with_jit:
checked_f = jax.jit(checked_f)
err, _ = checked_f(jnp.ones(()))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "1.0 cannot be 1.0")
@parameterized.named_parameters(("with_jit", True), ("without_jit", False))
def test_debug_check_disabled_errors(self, with_jit):
def f(x):
checkify.debug_check(jnp.all(x != x), "{x} cannot be {x}", x=x)
return x
checked_f = checkify.checkify(f, errors={})
if with_jit:
checked_f = jax.jit(checked_f)
err, _ = checked_f(jnp.ones((1,)))
self.assertIsNone(err.get())
def test_debug_check_jaxpr_roundtrip(self):
def f(x):
checkify.debug_check(jnp.all(x != x), "{x} cannot be {x}", x=x)
return x
x = jnp.ones(())
jaxpr = jax.make_jaxpr(f)(x)
roundtrip_f = partial(core.eval_jaxpr, jaxpr.jaxpr, jaxpr.consts)
checked_f = checkify.checkify(jax.jit(roundtrip_f))
err, _ = checked_f(jnp.ones(()))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "1.0 cannot be 1.0")
def test_fmt_args_array_type_error(self):
args_error = lambda: checkify.check(False, "{} world", "hello")
with self.assertRaisesRegex(TypeError, "Formatting arguments"):
checkify.checkify(args_error)()
kwargs_error = lambda: checkify.check(False, "{hello} world", hello="hello")
with self.assertRaisesRegex(TypeError, "Formatting arguments"):
checkify.checkify(kwargs_error)()
np_arrays_ok = lambda: checkify.check(False, "{} world", np.array(1.))
checkify.checkify(np_arrays_ok)()
trees_ok = lambda: checkify.check(False, "{}", {"hello": jnp.array(1.)})
checkify.checkify(trees_ok)()
def test_checkify_non_jax_type_input(self):
_ = checkify.checkify(lambda x: 1.)("hi") # does not crash
def test_checkify_static_args(self):
@checkify.checkify
def f(x):
if x:
return
_ = jax.jit(f, static_argnums=(0,))(True)
def test_check_pp_rule(self):
jaxpr = jax.make_jaxpr(lambda: checkify.check(False, "hi"))()
jaxpr.pretty_print(source_info=True, name_stack=True) # Does not crash.
| AssertPrimitiveTests |
python | eth-brownie__brownie | brownie/utils/docopt.py | {
"start": 25129,
"end": 29221
} | class ____(NamedTuple):
before_usage: str
usage_header: str
usage_body: str
after_usage: str
def _parse_docstring_sections(docstring: str) -> _DocSections:
"""Partition the docstring into the main sections.
The docstring is returned, split into a tuple of 4 pieces: text before the
usage section, the usage section header, the usage section body and text
following the usage section.
"""
usage_pattern = r"""
# Any number of lines (that don't include usage:) precede the usage section
\A(?P<before_usage>(?:(?!.*\busage:).*\n)*)
# The `usage:` section header.
^(?P<usage_header>.*\busage:)
(?P<usage_body>
# The first line of the body may follow the header without a line break:
(?:.*(?:\n|\Z))
# Any number of additional indented lines
(?:[ \t].*(?:\n|\Z))*
)
# Everything else
(?P<after_usage>(?:.|\n)*)\Z
"""
match = regex_match(usage_pattern, docstring, flags=re.M | re.I | re.VERBOSE)
if not match:
raise DocoptLanguageError(
'Failed to parse doc: "usage:" section (case-insensitive) not found. '
"Check http://docopt.org/ for examples of how your doc should look."
)
before, header, body, after = match.groups()
return _DocSections(before, header, body, after)
def _parse_options(docstring: str) -> list[_Option]:
"""Parse the option descriptions from the help text.
`docstring` is the sub-section of the overall docstring that option
descriptions should be parsed from. It must not contain the "usage:"
section, as wrapped lines in the usage pattern can be misinterpreted as
option descriptions.
Option descriptions appear below the usage patterns, They define synonymous
long and short options, options that have arguments, and the default values
of options' arguments. They look like this:
```
-v, --verbose Be more verbose
-n COUNT, --number COUNT The number of times to
do the thing [default: 42]
```
"""
option_start = r"""
# Option descriptions begin on a new line
^
# They may occur on the same line as an options: section heading
(?:.*options:)?
# They can be indented with whitespace
[ \t]*
# The description itself starts with the short or long flag (-x or --xxx)
(-\S)
"""
parts = re.split(option_start, docstring, flags=re.M | re.I | re.VERBOSE)[1:]
return list(map(_Option.parse, map("".join, zip(parts[::2], parts[1::2]))))
def _lint_docstring(sections: _DocSections):
"""Report apparent mistakes in the docstring format."""
if re.search("options:", sections.usage_body, flags=re.I):
raise DocoptLanguageError(
'Failed to parse docstring: "options:" (case-insensitive) was '
'found in "usage:" section. Use a blank line after the usage, or '
"start the next section without leading whitespace."
)
if re.search("usage:", sections.usage_body + sections.after_usage, flags=re.I):
raise DocoptLanguageError(
'Failed to parse docstring: More than one "usage:" ' "(case-insensitive) section found."
)
if sections.usage_body.strip() == "":
raise DocoptLanguageError(
'Failed to parse docstring: "usage:" section is empty.'
"Check http://docopt.org/ for examples of how your doc should look."
)
def _formal_usage(usage: str) -> str:
program_name, *tokens = usage.split()
return "( " + " ".join(") | (" if s == program_name else s for s in tokens) + " )"
def _extras(default_help: bool, version: None, options: list[_Pattern], docstring: str) -> None:
if default_help and any(
(o.name in ("-h", "--help")) and o.value for o in options if isinstance(o, _Option)
):
print(docstring.strip("\n"))
sys.exit()
if version and any(
o.name == "--version" and o.value for o in options if isinstance(o, _Option)
):
print(version)
sys.exit()
| _DocSections |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 306499,
"end": 308497
} | class ____(Request):
"""
Get the list of task configuration items names
:param tasks: Task IDs
:type tasks: Sequence[str]
:param skip_empty: If set to 'true' then the names for configurations with
missing values are not returned
:type skip_empty: bool
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"skip_empty": {
"default": True,
"description": "If set to 'true' then the names for configurations with missing values are not returned",
"type": "boolean",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], skip_empty: Optional[bool] = True, **kwargs: Any) -> None:
super(GetConfigurationNamesRequest, self).__init__(**kwargs)
self.tasks = tasks
self.skip_empty = skip_empty
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("skip_empty")
def skip_empty(self) -> Optional[bool]:
return self._property_skip_empty
@skip_empty.setter
def skip_empty(self, value: Optional[bool]) -> None:
if value is None:
self._property_skip_empty = None
return
self.assert_isinstance(value, "skip_empty", (bool,))
self._property_skip_empty = value
| GetConfigurationNamesRequest |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 8185,
"end": 51299
} | class ____(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
if from_seq_length % from_block_size != 0:
raise ValueError("Query sided sequence length must be multiple of block size")
if to_seq_length % to_block_size != 0:
raise ValueError("Key/Value sided sequence length must be multiple of block size")
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
return context_layer, attention_probs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
"""Fast nd matrix multiplication"""
# faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
"""Fast nd matrix multiplication with transpose"""
# faster replacement of torch.einsum (bhqd,bhkd->bhqk)
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
# BigBirdPegasus block-sparse attention as suggested in paper
# ITC:
# global tokens: 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# ETC:
# global tokens: extra_globals_tokens + 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# Note:
# 1) Currently, ETC is not supported.
# 2) Window size is fixed to 3 blocks & it can be changed only by
# changing `block_size`.
# 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
# controlled only by `block_size`.
# attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
# hence following code can be divided into 5 parts.
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
attn_mask_penalty = -10000.0
# generate random attention and corresponding masks
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
# preparing block for randn attn
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
# 1st PART
# 1st block (global block) attention scores
# q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * attn_mask_penalty
first_attn_weights = nn.functional.softmax(
first_product, dim=-1
) # [bsz, n_heads, from_block_size, to_seq_len]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
# 2nd PART
# 2nd block attention scores
# q[1] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> 2nd, 3rd blocks
# global key blocks -> 1st block
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty
second_attn_weights = nn.functional.softmax(
second_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
# 3rd PART
# Middle blocks attention scores
# q[-2:2] x (sliding_keys, random_keys, global_keys)
# sliding attn is calculated using special trick of shifting tokens as discussed in paper
# random keys are generated by taking random indices as per `rand_attn`
# global keys -> 1st & last block
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
# sliding attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
inner_band_product = inner_band_product * rsqrt_d
# randn attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
rand_band_product = rand_band_product * rsqrt_d
# Including 1st block (since it's global)
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
last_band_product = last_band_product * rsqrt_d
# masking padded tokens
inner_band_product += (1.0 - band_mask) * attn_mask_penalty
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty
# completing attention scores matrix for all q[-2:2]
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# safely doing softmax since attention matrix is completed
attn_weights = nn.functional.softmax(
band_product, dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# contribution of sliding keys
# [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of random keys
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of global keys
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# 4th PART
# last 2nd token attention scores
# q[-2] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> last 3 blocks
# global key block -> 1st block
# random key block -> based on indices stored in `randn_attn`
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+r)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty
second_last_attn_weights = nn.functional.softmax(
second_last_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
# 5th PART
# last block (global) attention scores
# q[-1] x (k[0], k[1], k[2], k[3], .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * attn_mask_penalty
last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
# combining representations of all tokens
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
# this is just for visualizing; forward pass doesn't depend on following code
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (corresponding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(bsz, n_heads, -1, to_block_size) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(bsz, n_heads, -1, to_block_size) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :] = (
second_last_attn_weights[:, :, :, to_block_size : 4 * to_block_size]
) # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
"Make sure that the first two dimensions of params and indices are identical, but"
f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode="floor") * num_indices_to_pick_from
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""
Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
each block
"""
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
def _bigbird_block_rand_mask(
self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks chosen only up to last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
# using this method when from_seq_length in [1024, 3072, 4096]
if from_seq_length // from_block_size != to_seq_length // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
# During inference (eval) no randomness
if not self.training:
return rand_attn
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from length where num_random_blocks are chosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
num_rand_blocks
"""
# using this method when from_seq_length not in [1024, 3072, 4096]
if from_seq_length // from_block_size != to_seq_length // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
if from_seq_length not in plan_from_length:
raise ValueError("Error from sequence length not in plan!")
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjacency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# During inference (eval) no randomness
if not self.training:
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start from plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
"""
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention column start id.
to_end_block_id: int. random attention column end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blocks = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blocks.append(perm_block[i])
if len(selected_random_blocks) == num_rand_blocks:
break
return np.array(selected_random_blocks, dtype=np.int32)
| BigBirdPegasusBlockSparseAttention |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor28.py | {
"start": 706,
"end": 979
} | class ____(Generic[S]):
def __new__(cls, item: S) -> "ClassC[S]": ...
def __call__(self, obj: Any) -> S: ...
def func3(func1: Callable[..., T], func2: Callable[..., T]) -> T: ...
x2 = func3(ClassC(""), ClassC(1))
reveal_type(x2, expected_text="str | int")
| ClassC |
python | ionelmc__pytest-benchmark | tests/test_elasticsearch_storage.py | {
"start": 1527,
"end": 1801
} | class ____(ElasticsearchStorage):
def __init__(self):
self._es = mock.Mock(spec=elasticsearch.Elasticsearch)
self._es_hosts = self._es_index = self._es_doctype = 'mocked'
self.logger = logger
self.default_machine_id = 'FoobarOS'
| MockStorage |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_organization_seer_explorer_runs.py | {
"start": 8751,
"end": 11783
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-seer-explorer-runs"
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.url = reverse(self.endpoint, args=[self.organization.slug])
self.login_as(user=self.user)
def test_missing_gen_ai_features_flag(self) -> None:
with self.feature({"organizations:seer-explorer": True}):
with patch(
"sentry.seer.endpoints.organization_seer_explorer_runs.SeerExplorerClient",
side_effect=SeerPermissionError("Feature flag not enabled"),
):
response = self.client.get(self.url)
assert response.status_code == 403
assert response.data == {"detail": "Feature flag not enabled"}
def test_missing_seer_explorer_flag(self) -> None:
with self.feature({"organizations:gen-ai-features": True}):
with patch(
"sentry.seer.endpoints.organization_seer_explorer_runs.SeerExplorerClient",
side_effect=SeerPermissionError("Feature flag not enabled"),
):
response = self.client.get(self.url)
assert response.status_code == 403
assert response.data == {"detail": "Feature flag not enabled"}
def test_missing_seer_acknowledgement(self) -> None:
with self.feature(
{"organizations:gen-ai-features": True, "organizations:seer-explorer": True}
):
with patch(
"sentry.seer.endpoints.organization_seer_explorer_runs.SeerExplorerClient",
side_effect=SeerPermissionError(
"Seer has not been acknowledged by the organization."
),
):
response = self.client.get(self.url)
assert response.status_code == 403
assert response.data == {
"detail": "Seer has not been acknowledged by the organization."
}
def test_missing_allow_joinleave_org_flag(self) -> None:
with self.feature(
{"organizations:gen-ai-features": True, "organizations:seer-explorer": True}
):
with patch(
"sentry.seer.endpoints.organization_seer_explorer_runs.SeerExplorerClient",
side_effect=SeerPermissionError(
"Organization does not have open team membership enabled. Seer requires this to aggregate context across all projects and allow members to ask questions freely."
),
):
response = self.client.get(self.url)
assert response.status_code == 403
assert response.data == {
"detail": "Organization does not have open team membership enabled. Seer requires this to aggregate context across all projects and allow members to ask questions freely."
}
| TestOrganizationSeerExplorerRunsEndpointFeatureFlags |
python | graphql-python__graphene | examples/starwars_relay/schema.py | {
"start": 471,
"end": 1030
} | class ____(graphene.ObjectType):
"""A faction in the Star Wars saga"""
class Meta:
interfaces = (relay.Node,)
name = graphene.String(description="The name of the faction.")
ships = relay.ConnectionField(
ShipConnection, description="The ships used by the faction."
)
def resolve_ships(self, info, **args):
# Transform the instance ship_ids into real instances
return [get_ship(ship_id) for ship_id in self.ships]
@classmethod
def get_node(cls, info, id):
return get_faction(id)
| Faction |
python | ethereum__web3.py | tests/integration/go_ethereum/common.py | {
"start": 4099,
"end": 4164
} | class ____(AsyncNetModuleTest):
pass
| GoEthereumAsyncNetModuleTest |
python | ray-project__ray | python/ray/autoscaler/_private/aws/node_provider.py | {
"start": 3214,
"end": 28326
} | class ____(NodeProvider):
max_terminate_nodes = 1000
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes", True)
aws_credentials = provider_config.get("aws_credentials")
self.ec2 = make_ec2_resource(
region=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
aws_credentials=aws_credentials,
)
self.ec2_fail_fast = make_ec2_resource(
region=provider_config["region"],
max_retries=0,
aws_credentials=aws_credentials,
)
# Tags that we believe to actually be on EC2.
self.tag_cache = {}
# Tags that we will soon upload.
self.tag_cache_pending = defaultdict(dict)
# Number of threads waiting for a batched tag update.
self.batch_thread_count = 0
self.batch_update_done = threading.Event()
self.batch_update_done.set()
self.ready_for_new_batch = threading.Event()
self.ready_for_new_batch.set()
self.tag_cache_lock = threading.Lock()
self.count_lock = threading.Lock()
# Prevent concurrent create_node calls to get the same stopped/stopping node to reuse.
self._reuse_node_lock = threading.Lock()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters):
# Note that these filters are acceptable because they are set on
# node initialization, and so can never be sitting in the cache.
tag_filters = to_aws_format(tag_filters)
filters = [
{
"Name": "instance-state-name",
"Values": ["pending", "running"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
]
for k, v in tag_filters.items():
filters.append(
{
"Name": "tag:{}".format(k),
"Values": [v],
}
)
with boto_exception_handler("Failed to fetch running instances from AWS."):
nodes = list(self.ec2.instances.filter(Filters=filters))
# Populate the tag cache with initial information if necessary
for node in nodes:
if node.id in self.tag_cache:
continue
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"] for x in node.tags}
)
self.cached_nodes = {node.id: node for node in nodes}
return [node.id for node in nodes]
def is_running(self, node_id):
node = self._get_cached_node(node_id)
return node.state["Name"] == "running"
def is_terminated(self, node_id):
node = self._get_cached_node(node_id)
state = node.state["Name"]
return state not in ["running", "pending"]
def node_tags(self, node_id):
with self.tag_cache_lock:
d1 = self.tag_cache[node_id]
d2 = self.tag_cache_pending.get(node_id, {})
return dict(d1, **d2)
def external_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.public_ip_address is None:
node = self._get_node(node_id)
return node.public_ip_address
def internal_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.private_ip_address is None:
node = self._get_node(node_id)
return node.private_ip_address
def set_node_tags(self, node_id, tags):
is_batching_thread = False
with self.tag_cache_lock:
if not self.tag_cache_pending:
is_batching_thread = True
# Wait for threads in the last batch to exit
self.ready_for_new_batch.wait()
self.ready_for_new_batch.clear()
self.batch_update_done.clear()
self.tag_cache_pending[node_id].update(tags)
if is_batching_thread:
time.sleep(TAG_BATCH_DELAY)
with self.tag_cache_lock:
self._update_node_tags()
self.batch_update_done.set()
with self.count_lock:
self.batch_thread_count += 1
self.batch_update_done.wait()
with self.count_lock:
self.batch_thread_count -= 1
if self.batch_thread_count == 0:
self.ready_for_new_batch.set()
def _update_node_tags(self):
batch_updates = defaultdict(list)
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id].update(tags)
self.tag_cache_pending = defaultdict(dict)
self._create_tags(batch_updates)
def _create_tags(self, batch_updates):
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AWSNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.ec2.meta.client.create_tags(
Resources=node_ids,
Tags=[{"Key": k, "Value": v}],
)
def create_node(self, node_config, tags, count) -> Dict[str, Any]:
"""Creates instances.
Returns dict mapping instance id to ec2.Instance object for the created
instances.
"""
# sort tags by key to support deterministic unit test stubbing
tags = OrderedDict(sorted(copy.deepcopy(tags).items()))
reused_nodes_dict = {}
# Try to reuse previously stopped nodes with compatible configs
if self.cache_stopped_nodes:
# TODO(ekl) this is breaking the abstraction boundary a little by
# peeking into the tag set.
filters = [
{
"Name": "instance-state-name",
"Values": ["stopped", "stopping"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
{
"Name": "tag:{}".format(TAG_RAY_NODE_KIND),
"Values": [tags[TAG_RAY_NODE_KIND]],
},
{
"Name": "tag:{}".format(TAG_RAY_LAUNCH_CONFIG),
"Values": [tags[TAG_RAY_LAUNCH_CONFIG]],
},
]
# This tag may not always be present.
if TAG_RAY_USER_NODE_TYPE in tags:
filters.append(
{
"Name": "tag:{}".format(TAG_RAY_USER_NODE_TYPE),
"Values": [tags[TAG_RAY_USER_NODE_TYPE]],
}
)
with self._reuse_node_lock:
reuse_nodes = list(self.ec2.instances.filter(Filters=filters))[:count]
reuse_node_ids = [n.id for n in reuse_nodes]
reused_nodes_dict = {n.id: n for n in reuse_nodes}
if reuse_nodes:
cli_logger.print(
# todo: handle plural vs singular?
"Reusing nodes {}. "
"To disable reuse, set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration.",
cli_logger.render_list(reuse_node_ids),
)
# todo: timed?
with cli_logger.group("Stopping instances to reuse"):
for node in reuse_nodes:
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"] for x in node.tags}
)
if node.state["Name"] == "stopping":
cli_logger.print(
"Waiting for instance {} to stop", node.id
)
node.wait_until_stopped()
self.ec2.meta.client.start_instances(InstanceIds=reuse_node_ids)
for node_id in reuse_node_ids:
self.set_node_tags(node_id, tags)
count -= len(reuse_node_ids)
created_nodes_dict = {}
if count:
created_nodes_dict = self._create_node(node_config, tags, count)
all_created_nodes = reused_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
@staticmethod
def _merge_tag_specs(
tag_specs: List[Dict[str, Any]], user_tag_specs: List[Dict[str, Any]]
) -> None:
"""
Merges user-provided node config tag specifications into a base
list of node provider tag specifications. The base list of
node provider tag specs is modified in-place.
This allows users to add tags and override values of existing
tags with their own, and only applies to the resource type
"instance". All other resource types are appended to the list of
tag specs.
Args:
tag_specs (List[Dict[str, Any]]): base node provider tag specs
user_tag_specs (List[Dict[str, Any]]): user's node config tag specs
"""
for user_tag_spec in user_tag_specs:
if user_tag_spec["ResourceType"] == "instance":
for user_tag in user_tag_spec["Tags"]:
exists = False
for tag in tag_specs[0]["Tags"]:
if user_tag["Key"] == tag["Key"]:
exists = True
tag["Value"] = user_tag["Value"]
break
if not exists:
tag_specs[0]["Tags"] += [user_tag]
else:
tag_specs += [user_tag_spec]
def _create_node(self, node_config, tags, count):
created_nodes_dict = {}
tags = to_aws_format(tags)
conf = node_config.copy()
tag_pairs = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}
]
for k, v in tags.items():
tag_pairs.append(
{
"Key": k,
"Value": v,
}
)
if CloudwatchHelper.cloudwatch_config_exists(self.provider_config, "agent"):
cwa_installed = self._check_ami_cwa_installation(node_config)
if cwa_installed:
tag_pairs.extend(
[
{
"Key": CLOUDWATCH_AGENT_INSTALLED_TAG,
"Value": "True",
}
]
)
tag_specs = [
{
"ResourceType": "instance",
"Tags": tag_pairs,
}
]
user_tag_specs = conf.get("TagSpecifications", [])
AWSNodeProvider._merge_tag_specs(tag_specs, user_tag_specs)
# SubnetIds is not a real config key: we must resolve to a
# single SubnetId before invoking the AWS API.
subnet_ids = conf.pop("SubnetIds")
# update config with min/max node counts and tag specs
conf.update({"MinCount": 1, "MaxCount": count, "TagSpecifications": tag_specs})
# Try to always launch in the first listed subnet.
subnet_idx = 0
cli_logger_tags = {}
# NOTE: This ensures that we try ALL availability zones before
# throwing an error.
max_tries = max(BOTO_CREATE_MAX_RETRIES, len(subnet_ids))
for attempt in range(1, max_tries + 1):
try:
if "NetworkInterfaces" in conf:
net_ifs = conf["NetworkInterfaces"]
# remove security group IDs previously copied from network
# interfaces (create_instances call fails otherwise)
conf.pop("SecurityGroupIds", None)
cli_logger_tags["network_interfaces"] = str(net_ifs)
else:
subnet_id = subnet_ids[subnet_idx % len(subnet_ids)]
conf["SubnetId"] = subnet_id
cli_logger_tags["subnet_id"] = subnet_id
created = self.ec2_fail_fast.create_instances(**conf)
created_nodes_dict = {n.id: n for n in created}
# todo: timed?
# todo: handle plurality?
with cli_logger.group(
"Launched {} nodes", count, _tags=cli_logger_tags
):
for instance in created:
# NOTE(maximsmol): This is needed for mocking
# boto3 for tests. This is likely a bug in moto
# but AWS docs don't seem to say.
# You can patch moto/ec2/responses/instances.py
# to fix this (add <stateReason> to EC2_RUN_INSTANCES)
# The correct value is technically
# {"code": "0", "Message": "pending"}
state_reason = "pending"
if instance.state_reason:
state_reason = (
instance.state_reason["Message"] or state_reason
)
cli_logger.print(
"Launched instance {}",
instance.instance_id,
_tags=dict(
state=instance.state["Name"],
info=state_reason,
),
)
break
except botocore.exceptions.ClientError as exc:
# Launch failure may be due to instance type availability in
# the given AZ
subnet_idx += 1
if attempt == max_tries:
try:
exc = NodeLaunchException(
category=exc.response["Error"]["Code"],
description=exc.response["Error"]["Message"],
src_exc_info=sys.exc_info(),
)
except Exception:
# In theory, all ClientError's we expect to get should
# have these fields, but just in case we can't parse
# it, it's fine, just throw the original error.
logger.warning("Couldn't parse exception.", exc)
pass
cli_logger.abort(
"Failed to launch instances. Max attempts exceeded.",
exc=exc,
)
else:
cli_logger.warning(
"create_instances: Attempt failed with {}, retrying.", exc
)
return created_nodes_dict
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
cli_logger.print(
"Terminating instance {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
node_id,
) # todo: show node name?
node.terminate()
else:
cli_logger.print(
"Stopping instance {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
node_id,
) # todo: show node name?
node.stop()
else:
node.terminate()
# TODO (Alex): We are leaking the tag cache here. Naively, we would
# want to just remove the cache entry here, but terminating can be
# asyncrhonous or error, which would result in a use after free error.
# If this leak becomes bad, we can garbage collect the tag cache when
# the node cache is updated.
def _check_ami_cwa_installation(self, config):
response = self.ec2.meta.client.describe_images(ImageIds=[config["ImageId"]])
cwa_installed = False
images = response.get("Images")
if images:
assert len(images) == 1, (
f"Expected to find only 1 AMI with the given ID, "
f"but found {len(images)}."
)
image_name = images[0].get("Name", "")
if CLOUDWATCH_AGENT_INSTALLED_AMI_TAG in image_name:
cwa_installed = True
return cwa_installed
def terminate_nodes(self, node_ids):
if not node_ids:
return
terminate_instances_func = self.ec2.meta.client.terminate_instances
stop_instances_func = self.ec2.meta.client.stop_instances
# In some cases, this function stops some nodes, but terminates others.
# Each of these requires a different EC2 API call. So, we use the
# "nodes_to_terminate" dict below to keep track of exactly which API
# call will be used to stop/terminate which set of nodes. The key is
# the function to use, and the value is the list of nodes to terminate
# with that function.
nodes_to_terminate = {terminate_instances_func: [], stop_instances_func: []}
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
# todo: show node names?
cli_logger.print(
"Stopping instances {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
cli_logger.render_list(on_demand_ids),
)
if spot_ids:
cli_logger.print(
"Terminating instances {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
cli_logger.render_list(spot_ids),
)
nodes_to_terminate[stop_instances_func] = on_demand_ids
nodes_to_terminate[terminate_instances_func] = spot_ids
else:
nodes_to_terminate[terminate_instances_func] = node_ids
max_terminate_nodes = (
self.max_terminate_nodes
if self.max_terminate_nodes is not None
else len(node_ids)
)
for terminate_func, nodes in nodes_to_terminate.items():
for start in range(0, len(nodes), max_terminate_nodes):
terminate_func(InstanceIds=nodes[start : start + max_terminate_nodes])
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
# The EC2 API is eventually consistent. This means that an instance
# might not be immediately visible. So we need to retry the query a few times.
# See: https://docs.aws.amazon.com/ec2/latest/devguide/eventual-consistency.html
# and https://github.com/ray-project/ray/issues/51861
for attempts in range(max(BOTO_MAX_RETRIES, 1)): # at least try once.
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
if len(matches) == 1:
return matches[0]
cli_logger.warning(
"Attempt to fetch EC2 instances that have instance ID {}. Got {} matching EC2 instances. Will retry after {} second. This is retry number {}, and the maximum number of retries is {}.",
node_id,
len(matches),
LIST_RETRY_DELAY_SEC,
attempts + 1,
BOTO_MAX_RETRIES,
)
time.sleep(LIST_RETRY_DELAY_SEC)
raise AssertionError("Invalid instance id {}".format(node_id))
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aws(cluster_config)
@staticmethod
def fillout_available_node_types_resources(
cluster_config: Dict[str, Any]
) -> Dict[str, Any]:
"""Fills out missing "resources" field for available_node_types."""
if "available_node_types" not in cluster_config:
return cluster_config
cluster_config = copy.deepcopy(cluster_config)
instances_list = list_ec2_instances(
cluster_config["provider"]["region"],
cluster_config["provider"].get("aws_credentials"),
)
instances_dict = {
instance["InstanceType"]: instance for instance in instances_list
}
available_node_types = cluster_config["available_node_types"]
head_node_type = cluster_config["head_node_type"]
for node_type in available_node_types:
instance_type = available_node_types[node_type]["node_config"][
"InstanceType"
]
if instance_type in instances_dict:
cpus = instances_dict[instance_type]["VCpuInfo"]["DefaultVCpus"]
autodetected_resources = {"CPU": cpus}
if node_type != head_node_type:
# we only autodetect worker node type memory resource
memory_total = instances_dict[instance_type]["MemoryInfo"][
"SizeInMiB"
]
memory_total = int(memory_total) * 1024 * 1024
prop = 1 - ray_constants.DEFAULT_OBJECT_STORE_MEMORY_PROPORTION
memory_resources = int(memory_total * prop)
autodetected_resources["memory"] = memory_resources
for (
accelerator_manager
) in ray._private.accelerators.get_all_accelerator_managers():
num_accelerators = (
accelerator_manager.get_ec2_instance_num_accelerators(
instance_type, instances_dict
)
)
accelerator_type = (
accelerator_manager.get_ec2_instance_accelerator_type(
instance_type, instances_dict
)
)
if num_accelerators:
autodetected_resources[
accelerator_manager.get_resource_name()
] = num_accelerators
if accelerator_type:
autodetected_resources[
f"accelerator_type:{accelerator_type}"
] = 1
autodetected_resources.update(
available_node_types[node_type].get("resources", {})
)
if autodetected_resources != available_node_types[node_type].get(
"resources", {}
):
available_node_types[node_type][
"resources"
] = autodetected_resources
logger.debug(
"Updating the resources of {} to {}.".format(
node_type, autodetected_resources
)
)
else:
raise ValueError(
"Instance type "
+ instance_type
+ " is not available in AWS region: "
+ cluster_config["provider"]["region"]
+ "."
)
return cluster_config
| AWSNodeProvider |
python | pytorch__pytorch | torch/_inductor/mkldnn_ir.py | {
"start": 11315,
"end": 12878
} | class ____(ExternKernelAlloc):
def __init__(
self,
layout,
inputs,
constant_args=(),
) -> None:
self.device_type = get_device_type(inputs[0])
super().__init__(
layout,
inputs,
constant_args,
None,
op_overload=torch.ops.mkldnn._convolution_pointwise.default,
cpp_kernel_name=f"aoti_torch_{self.device_type}_mkldnn__convolution_pointwise",
)
def codegen(self, wrapper):
wrapper.include_extra_header(
f"torch/csrc/inductor/aoti_torch/c/shim_{self.device_type}.h"
)
super().codegen(wrapper)
@classmethod
def create(
cls,
x: "TensorBox",
weight: "TensorBox",
bias: "TensorBox",
padding_: list[int],
stride_: list[int],
dilation_: list[int],
groups: int,
attr,
scalars: Optional[list[Any]],
algorithm,
):
(
inputs,
constant_args,
kernel_layout,
_,
_,
) = _prepare_convolution_fusion_create(
cls, x, weight, bias, padding_, stride_, dilation_, groups
)
constant_args = constant_args + [
attr,
may_convert_to_optional(scalars),
algorithm,
]
packed = ConvolutionUnary(
layout=kernel_layout,
inputs=inputs,
constant_args=constant_args,
)
return _create_output_node(packed)
| ConvolutionUnary |
python | scrapy__scrapy | tests/CrawlerProcess/twisted_reactor_select.py | {
"start": 58,
"end": 304
} | class ____(scrapy.Spider):
name = "epoll_reactor"
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor",
}
)
process.crawl(SelectReactorSpider)
process.start()
| SelectReactorSpider |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/chains/test_flare.py | {
"start": 331,
"end": 1665
} | class ____(BaseRetriever):
"""Minimal no-op retriever used only for constructing FlareChain in tests."""
def _get_relevant_documents(self, query: str) -> list[Document]: # type: ignore[override]
del query # mark used
return []
async def _aget_relevant_documents(self, query: str) -> list[Document]: # type: ignore[override]
del query # mark used
return []
def test_from_llm_rejects_non_chatopenai() -> None:
class Dummy:
pass
with pytest.raises(TypeError):
FlareChain.from_llm(Dummy()) # type: ignore[arg-type]
@pytest.mark.requires("langchain_openai")
def test_from_llm_uses_supplied_chatopenai(monkeypatch: pytest.MonkeyPatch) -> None:
try:
from langchain_openai import ChatOpenAI
except ImportError: # pragma: no cover
pytest.skip("langchain-openai not installed")
# Provide dummy API key to satisfy constructor env validation.
monkeypatch.setenv("OPENAI_API_KEY", "TEST")
supplied = ChatOpenAI(temperature=0.51, logprobs=True, max_completion_tokens=21)
chain = FlareChain.from_llm(
supplied,
max_generation_len=32,
retriever=_EmptyRetriever(),
)
llm_in_chain = cast("RunnableSequence", chain.question_generator_chain).steps[1]
assert llm_in_chain is supplied
| _EmptyRetriever |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 12602,
"end": 12886
} | class ____(MarkdownHeader):
"""An H5 Markdown header."""
LEVEL = 5
DEFAULT_CSS = """
MarkdownH5 {
color: $markdown-h5-color;
background: $markdown-h5-background;
text-style: $markdown-h5-text-style;
margin: 1 0;
}
"""
| MarkdownH5 |
python | openai__openai-python | src/openai/types/graders/score_model_grader_param.py | {
"start": 1787,
"end": 3073
} | class ____(TypedDict, total=False):
max_completions_tokens: Optional[int]
"""The maximum number of tokens the grader model may generate in its response."""
reasoning_effort: Optional[ReasoningEffort]
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
seed: Optional[int]
"""A seed value to initialize the randomness, during sampling."""
temperature: Optional[float]
"""A higher temperature increases randomness in the outputs."""
top_p: Optional[float]
"""An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
| SamplingParams |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 39699,
"end": 40221
} | class ____(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
cls.Point = cls._type_fixture()
cls.mapper_registry.map_imperatively(
FooWithEq,
foo,
properties={"data": composite(cls.Point, foo.c.x, foo.c.y)},
)
def test_unpickle_modified_eq(self):
u1 = FooWithEq(data=self.Point(3, 5))
for loads, dumps in picklers():
loads(dumps(u1))
| MutableCompositesUnpickleTest |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/clip_ranges_test.py | {
"start": 823,
"end": 1412
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
self.inputs = {
"input": torch.rand(LENGTH, M, N, device=device).type(dtype),
"max_length": MAX_LENGTH,
}
self.set_module_name("clip_ranges")
def forward(self, input, max_length: int):
return torch.ops.fb.clip_ranges(input, max_length)
op_bench.generate_pt_test(
clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| ClipRangesBenchmark |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | {
"start": 11237,
"end": 12649
} | class ____(Benchmark):
r"""
DeVilliers-Glasser 1 objective function.
This class defines the DeVilliers-Glasser 1 [1]_ function global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeVilliersGlasser01}}(x) = \sum_{i=1}^{24} \left[ x_1x_2^{t_i}
\sin(x_3t_i + x_4) - y_i \right ]^2
Where, in this exercise, :math:`t_i = 0.1(i - 1)` and
:math:`y_i = 60.137(1.371^{t_i}) \sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[1, 100]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`x = [60.137, 1.371, 3.112, 1.761]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([1.0] * self.N, [100.0] * self.N))
self.global_optimum = [[60.137, 1.371, 3.112, 1.761]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = 0.1 * arange(24)
y = 60.137 * (1.371 ** t) * sin(3.112 * t + 1.761)
return sum((x[0] * (x[1] ** t) * sin(x[2] * t + x[3]) - y) ** 2.0)
| DeVilliersGlasser01 |
python | getsentry__sentry | src/sentry/api/bases/project.py | {
"start": 2168,
"end": 2306
} | class ____(StaffPermissionMixin, ProjectPermission):
"""Allows staff to access project endpoints."""
pass
| ProjectAndStaffPermission |
python | joke2k__faker | faker/providers/company/sl_SI/__init__.py | {
"start": 45,
"end": 255
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{first_name}} {{last_name}} s.p.",
)
company_suffixes = (
"d.o.o.",
"d.d.",
)
| Provider |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 2663,
"end": 7788
} | class ____:
"""
The base class for anything that participates in the transform tree
and needs to invalidate its parents or be invalidated. This includes
classes that are not really transforms, such as bounding boxes, since some
transforms depend on bounding boxes to compute their values.
"""
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
# Possible values for the _invalid attribute.
_VALID, _INVALID_AFFINE_ONLY, _INVALID_FULL = range(3)
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Parameters
----------
shorthand_name : str
A string representing the "name" of the transform. The name carries
no significance other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
self._parents = {}
# Initially invalid, until first computation.
self._invalid = self._INVALID_FULL
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or its repr
return self._shorthand_name or repr(self)
def __getstate__(self):
# turn the dictionary with weak values into a normal dictionary
return {**self.__dict__,
'_parents': {k: v() for k, v in self._parents.items()}}
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a dictionary with weak values
# The extra lambda is to provide a callback to remove dead
# weakrefs from the dictionary when garbage collection is done.
self._parents = {
k: weakref.ref(v, lambda _, pop=self._parents.pop, k=k: pop(k))
for k, v in self._parents.items() if v is not None}
def __copy__(self):
cls = type(self)
other = cls.__new__(cls)
other.__dict__.update(self.__dict__)
# If `c = a + b; a1 = copy(a)`, then modifications to `a1` do not
# propagate back to `c`, i.e. we need to clear the parents of `a1`.
other._parents = {}
# If `c = a + b; c1 = copy(c)`, then modifications to `a` also need to
# be propagated to `c1`.
for key, val in vars(self).items():
if isinstance(val, TransformNode) and id(self) in val._parents:
other.set_children(val) # val == getattr(other, key)
return other
def invalidate(self):
"""
Invalidate this `TransformNode` and triggers an invalidation of its
ancestors. Should be called any time the transform changes.
"""
return self._invalidate_internal(
level=self._INVALID_AFFINE_ONLY if self.is_affine else self._INVALID_FULL,
invalidating_node=self)
def _invalidate_internal(self, level, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# If we are already more invalid than the currently propagated invalidation,
# then we don't need to do anything.
if level <= self._invalid and not self.pass_through:
return
self._invalid = level
for parent in list(self._parents.values()):
parent = parent() # Dereference the weak reference.
if parent is not None:
parent._invalidate_internal(level=level, invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
# Parents are stored as weak references, so that if the
# parents are destroyed, references from the children won't
# keep them alive.
id_self = id(self)
for child in children:
# Use weak references so this dictionary won't keep obsolete nodes
# alive; the callback deletes the dictionary entry. This is a
# performance improvement over using WeakValueDictionary.
ref = weakref.ref(
self, lambda _, pop=child._parents.pop, k=id_self: pop(k))
child._parents[id_self] = ref
def frozen(self):
"""
Return a frozen copy of this transform node. The frozen copy will not
be updated when its children change. Useful for storing a previously
known state of a transform where ``copy.deepcopy()`` might normally be
used.
"""
return self
| TransformNode |
python | skorch-dev__skorch | examples/accelerate-multigpu/run-no-skorch.py | {
"start": 491,
"end": 2644
} | class ____(BaseEstimator):
def __init__(self, module, accelerator):
self.module = module
self.accelerator = accelerator
def fit(self, X, y, **fit_params):
X = torch.as_tensor(X)
y = torch.as_tensor(y)
dataset = torch.utils.data.TensorDataset(X, y)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64)
optimizer = torch.optim.SGD(self.module.parameters(), lr=0.01)
self.module = self.accelerator.prepare(self.module)
optimizer = self.accelerator.prepare(optimizer)
dataloader = self.accelerator.prepare(dataloader)
# training
self.module.train()
for epoch in range(5):
for source, targets in dataloader:
optimizer.zero_grad()
output = self.module(source)
loss = nn.functional.nll_loss(output, targets)
self.accelerator.backward(loss)
optimizer.step()
return self
def predict_proba(self, X):
self.module.eval()
X = torch.as_tensor(X)
dataset = torch.utils.data.TensorDataset(X)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64)
dataloader = self.accelerator.prepare(dataloader)
probas = []
with torch.no_grad():
for source, *_ in dataloader:
output = self.module(source)
output = self.accelerator.gather_for_metrics(output)
output = output.cpu().detach().numpy()
probas.append(output)
return np.vstack(probas)
def predict(self, X):
y_proba = self.predict_proba(X)
return y_proba.argmax(1)
def main():
X, y = make_classification(10000, n_features=100, n_informative=50, random_state=0)
X = X.astype(np.float32)
module = MyModule()
accelerator = Accelerator()
net = Net(module, accelerator)
# cross_validate creates a deepcopy of the accelerator attribute
res = cross_validate(
net, X, y, cv=2, scoring='accuracy', verbose=3, error_score='raise',
)
print(res)
if __name__ == '__main__':
main()
| Net |
python | pypa__virtualenv | src/virtualenv/seed/wheels/periodic_update.py | {
"start": 5708,
"end": 15586
} | class ____:
def __init__(self, started, completed, versions, periodic) -> None:
self.started = started
self.completed = completed
self.versions = versions
self.periodic = periodic
@classmethod
def from_dict(cls, dictionary):
if dictionary is None:
dictionary = {}
return cls(
load_datetime(dictionary.get("started")),
load_datetime(dictionary.get("completed")),
[NewVersion.from_dict(v) for v in dictionary.get("versions", [])],
dictionary.get("periodic"),
)
@classmethod
def from_app_data(cls, app_data, distribution, for_py_version):
raw_json = app_data.embed_update_log(distribution, for_py_version).read()
return cls.from_dict(raw_json)
def to_dict(self):
return {
"started": dump_datetime(self.started),
"completed": dump_datetime(self.completed),
"periodic": self.periodic,
"versions": [r.to_dict() for r in self.versions],
}
@property
def needs_update(self):
now = datetime.now(tz=timezone.utc)
if self.completed is None: # never completed
return self._check_start(now)
if now - self.completed <= UPDATE_PERIOD:
return False
return self._check_start(now)
def _check_start(self, now):
return self.started is None or now - self.started > UPDATE_ABORTED_DELAY
def trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, env, periodic): # noqa: PLR0913
wheel_path = None if wheel is None else str(wheel.path)
cmd = [
sys.executable,
"-c",
dedent(
"""
from virtualenv.report import setup_report, MAX_LEVEL
from virtualenv.seed.wheels.periodic_update import do_update
setup_report(MAX_LEVEL, show_pid=True)
do_update({!r}, {!r}, {!r}, {!r}, {!r}, {!r})
""",
)
.strip()
.format(distribution, for_py_version, wheel_path, str(app_data), [str(p) for p in search_dirs], periodic),
]
debug = env.get("_VIRTUALENV_PERIODIC_UPDATE_INLINE") == "1"
pipe = None if debug else DEVNULL
kwargs = {"stdout": pipe, "stderr": pipe}
if not debug and sys.platform == "win32":
kwargs["creationflags"] = CREATE_NO_WINDOW
process = Popen(cmd, **kwargs)
LOGGER.info(
"triggered periodic upgrade of %s%s (for python %s) via background process having PID %d",
distribution,
"" if wheel is None else f"=={wheel.version}",
for_py_version,
process.pid,
)
if debug:
process.communicate() # on purpose not called to make it a background process
else:
# set the returncode here -> no ResourceWarning on main process exit if the subprocess still runs
process.returncode = 0
def do_update(distribution, for_py_version, embed_filename, app_data, search_dirs, periodic): # noqa: PLR0913
versions = None
try:
versions = _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs)
finally:
LOGGER.debug("done %s %s with %s", distribution, for_py_version, versions)
return versions
def _run_do_update( # noqa: C901, PLR0913
app_data,
distribution,
embed_filename,
for_py_version,
periodic,
search_dirs,
):
from virtualenv.seed.wheels import acquire # noqa: PLC0415
wheel_filename = None if embed_filename is None else Path(embed_filename)
embed_version = None if wheel_filename is None else Wheel(wheel_filename).version_tuple
app_data = AppDataDiskFolder(app_data) if isinstance(app_data, str) else app_data
search_dirs = [Path(p) if isinstance(p, str) else p for p in search_dirs]
wheelhouse = app_data.house
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
now = datetime.now(tz=timezone.utc)
update_versions, other_versions = [], []
for version in u_log.versions:
if version.source in {"periodic", "manual"}:
update_versions.append(version)
else:
other_versions.append(version)
if periodic:
source = "periodic"
else:
source = "manual"
# mark the most recent one as source "manual"
if update_versions:
update_versions[0].source = source
if wheel_filename is not None:
dest = wheelhouse / wheel_filename.name
if not dest.exists():
copy2(str(wheel_filename), str(wheelhouse))
last, last_version, versions, filenames = None, None, [], set()
while last is None or not last.use(now, ignore_grace_period_ci=True):
download_time = datetime.now(tz=timezone.utc)
dest = acquire.download_wheel(
distribution=distribution,
version_spec=None if last_version is None else f"<{last_version}",
for_py_version=for_py_version,
search_dirs=search_dirs,
app_data=app_data,
to_folder=wheelhouse,
env=os.environ,
)
if dest is None or (update_versions and update_versions[0].filename == dest.name):
break
release_date = release_date_for_wheel_path(dest.path)
last = NewVersion(filename=dest.path.name, release_date=release_date, found_date=download_time, source=source)
LOGGER.info("detected %s in %s", last, datetime.now(tz=timezone.utc) - download_time)
versions.append(last)
filenames.add(last.filename)
last_wheel = last.wheel
last_version = last_wheel.version
if embed_version is not None and embed_version >= last_wheel.version_tuple:
break # stop download if we reach the embed version
u_log.periodic = periodic
if not u_log.periodic:
u_log.started = now
# update other_versions by removing version we just found
other_versions = [version for version in other_versions if version.filename not in filenames]
u_log.versions = versions + update_versions + other_versions
u_log.completed = datetime.now(tz=timezone.utc)
embed_update_log.write(u_log.to_dict())
return versions
def release_date_for_wheel_path(dest):
wheel = Wheel(dest)
# the most accurate is to ask PyPi - e.g. https://pypi.org/pypi/pip/json,
# see https://warehouse.pypa.io/api-reference/json/ for more details
content = _pypi_get_distribution_info_cached(wheel.distribution)
if content is not None:
try:
upload_time = content["releases"][wheel.version][0]["upload_time"]
return datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
except Exception as exception: # noqa: BLE001
LOGGER.error("could not load release date %s because %r", content, exception) # noqa: TRY400
return None
def _request_context():
yield None
# fallback to non verified HTTPS (the information we request is not sensitive, so fallback)
yield ssl._create_unverified_context() # noqa: S323, SLF001
_PYPI_CACHE = {}
def _pypi_get_distribution_info_cached(distribution):
if distribution not in _PYPI_CACHE:
_PYPI_CACHE[distribution] = _pypi_get_distribution_info(distribution)
return _PYPI_CACHE[distribution]
def _pypi_get_distribution_info(distribution):
content, url = None, f"https://pypi.org/pypi/{distribution}/json"
try:
for context in _request_context():
try:
with urlopen(url, context=context) as file_handler: # noqa: S310
content = json.load(file_handler)
break
except URLError as exception:
LOGGER.error("failed to access %s because %r", url, exception) # noqa: TRY400
except Exception as exception: # noqa: BLE001
LOGGER.error("failed to access %s because %r", url, exception) # noqa: TRY400
return content
def manual_upgrade(app_data, env):
threads = []
for for_py_version, distribution_to_package in BUNDLE_SUPPORT.items():
# load extra search dir for the given for_py
for distribution in distribution_to_package:
thread = Thread(target=_run_manual_upgrade, args=(app_data, distribution, for_py_version, env))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def _run_manual_upgrade(app_data, distribution, for_py_version, env):
start = datetime.now(tz=timezone.utc)
from .bundle import from_bundle # noqa: PLC0415
current = from_bundle(
distribution=distribution,
version=None,
for_py_version=for_py_version,
search_dirs=[],
app_data=app_data,
do_periodic_update=False,
env=env,
)
LOGGER.warning(
"upgrade %s for python %s with current %s",
distribution,
for_py_version,
"" if current is None else current.name,
)
versions = do_update(
distribution=distribution,
for_py_version=for_py_version,
embed_filename=current.path,
app_data=app_data,
search_dirs=[],
periodic=False,
)
args = [
distribution,
for_py_version,
datetime.now(tz=timezone.utc) - start,
]
if versions:
args.append("\n".join(f"\t{v}" for v in versions))
ver_update = "new entries found:\n%s" if versions else "no new versions found"
msg = f"upgraded %s for python %s in %s {ver_update}"
LOGGER.warning(msg, *args)
__all__ = [
"NewVersion",
"UpdateLog",
"add_wheel_to_update_log",
"do_update",
"dump_datetime",
"load_datetime",
"manual_upgrade",
"periodic_update",
"release_date_for_wheel_path",
"trigger_update",
]
| UpdateLog |
python | pypa__hatch | tests/project/test_core.py | {
"start": 4604,
"end": 6246
} | class ____:
def test_location_is_file(self, temp_dir, mocker):
script_path = temp_dir / "script.py"
script_path.touch()
project = Project(script_path)
project.find_project_root()
with temp_dir.as_cwd():
mocker.patch("hatch.utils.fs.Path.as_cwd", side_effect=Exception)
with project.ensure_cwd() as cwd:
assert cwd == temp_dir
def test_cwd_is_location(self, temp_dir, mocker):
project_file = temp_dir / "pyproject.toml"
project_file.touch()
project = Project(temp_dir)
project.find_project_root()
with temp_dir.as_cwd():
mocker.patch("hatch.utils.fs.Path.as_cwd", side_effect=Exception)
with project.ensure_cwd() as cwd:
assert cwd == temp_dir
def test_cwd_inside_location(self, temp_dir, mocker):
project_file = temp_dir / "pyproject.toml"
project_file.touch()
project = Project(temp_dir)
project.find_project_root()
subdir = temp_dir / "subdir"
subdir.mkdir()
with subdir.as_cwd():
mocker.patch("hatch.utils.fs.Path.as_cwd", side_effect=Exception)
with project.ensure_cwd() as cwd:
assert cwd == subdir
def test_cwd_outside_location(self, temp_dir):
subdir = temp_dir / "subdir"
subdir.mkdir()
project_file = subdir / "pyproject.toml"
project_file.touch()
project = Project(subdir)
project.find_project_root()
with temp_dir.as_cwd(), project.ensure_cwd() as cwd:
assert cwd == subdir
| TestEnsureCWD |
python | pandas-dev__pandas | pandas/tests/test_sorting.py | {
"start": 6070,
"end": 11883
} | class ____:
def test_int64_overflow_outer_merge(self):
# #2690, combinatorial explosion
df1 = DataFrame(
np.random.default_rng(2).standard_normal((1000, 7)),
columns=list("ABCDEF") + ["G1"],
)
df2 = DataFrame(
np.random.default_rng(3).standard_normal((1000, 7)),
columns=list("ABCDEF") + ["G2"],
)
result = merge(df1, df2, how="outer")
assert len(result) == 2000
@pytest.mark.slow
def test_int64_overflow_check_sum_col(self, left_right):
left, right = left_right
out = merge(left, right, how="outer")
assert len(out) == len(left)
tm.assert_series_equal(out["left"], -out["right"], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
tm.assert_series_equal(out["left"], result, check_names=False)
assert result.name is None
@pytest.mark.slow
def test_int64_overflow_how_merge(self, left_right, join_type):
left, right = left_right
out = merge(left, right, how="outer")
out.sort_values(out.columns.tolist(), inplace=True)
tm.assert_frame_equal(out, merge(left, right, how=join_type, sort=True))
@pytest.mark.slow
def test_int64_overflow_sort_false_order(self, left_right):
left, right = left_right
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how="left", sort=False)
tm.assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how="left", sort=False)
tm.assert_frame_equal(right, out[right.columns.tolist()])
@pytest.mark.slow
def test_int64_overflow_one_to_many_none_match(self, join_type, sort):
# one-2-many/none match
how = join_type
low, high, n = -1 << 10, 1 << 10, 1 << 11
left = DataFrame(
np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(
np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# add duplicates & overlap with left to the right frame
i = np.random.default_rng(4).choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left["left"] = np.random.default_rng(2).standard_normal(len(left))
right["right"] = np.random.default_rng(2).standard_normal(len(right))
# shuffle left & right frames
left = left.sample(
frac=1, ignore_index=True, random_state=np.random.default_rng(5)
)
right = right.sample(
frac=1, ignore_index=True, random_state=np.random.default_rng(6)
)
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list("ABCDEFG")).iterrows():
ldict[idx].append(row["left"])
for idx, row in right.set_index(list("ABCDEFG")).iterrows():
rdict[idx].append(row["right"])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(
k
+ (
lv,
rv,
)
)
for k, rval in rdict.items():
if k not in ldict:
vals.extend(
k
+ (
np.nan,
rv,
)
for rv in rval
)
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
out = out.sort_values(out.columns.to_list(), ignore_index=True)
jmask = {
"left": out["left"].notna(),
"right": out["right"].notna(),
"inner": out["left"].notna() & out["right"].notna(),
"outer": np.ones(len(out), dtype="bool"),
}
mask = jmask[how]
frame = out[mask].sort_values(out.columns.to_list(), ignore_index=True)
assert mask.all() ^ mask.any() or how == "outer"
res = merge(left, right, how=how, sort=sort)
if sort:
kcols = list("ABCDEFG")
tm.assert_frame_equal(
res[kcols], res[kcols].sort_values(kcols, kind="mergesort")
)
# as in GH9092 dtypes break with outer/right join
# 2021-12-18: dtype does not break anymore
tm.assert_frame_equal(
frame, res.sort_values(res.columns.to_list(), ignore_index=True)
)
@pytest.mark.parametrize(
"codes_list, shape",
[
[
[
np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),
],
(4, 5, 6),
],
[
[
np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5),
],
(10000, 10000),
],
],
)
def test_decons(codes_list, shape):
group_index = get_group_index(codes_list, shape, sort=True, xnull=True)
codes_list2 = _decons_group_index(group_index, shape)
for a, b in zip(codes_list, codes_list2):
tm.assert_numpy_array_equal(a, b)
| TestMerge |
python | ansible__ansible | test/units/utils/test_serialization_profiles.py | {
"start": 7742,
"end": 8183
} | class ____:
parameters: _TestParameters
expected: _TestOutput
def __str__(self) -> str:
parts = [f'profile={self.parameters.profile_name}', f'value={self.parameters.value}']
if self.parameters.tags:
parts.append(f"tags={','.join(sorted(type(obj).__name__ for obj in self.parameters.tags))}")
if self.parameters.lazy:
parts.append('lazy')
return '; '.join(parts)
| _TestCase |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 3023,
"end": 4047
} | class ____:
"""
As part of Numba's preprocessing step, incoming function arguments are
modified based on the associated type for that argument that was used
to JIT the kernel. However it only knows how to handle built in array
types natively. With string UDFs, the jitted type is string_view*,
which numba does not know how to handle.
This class converts string_view* to raw pointer arguments, which Numba
knows how to use.
See numba.cuda.compiler._prepare_args for details.
"""
def prepare_args(self, ty, val, **kwargs):
if isinstance(ty, types.CPointer) and isinstance(
ty.dtype, (StringView, UDFString, ManagedUDFString)
):
return types.uint64, val.ptr if isinstance(
val, rmm.pylibrmm.device_buffer.DeviceBuffer
) else val.get_ptr(mode="read")
else:
return ty, val
str_view_arg_handler = StrViewArgHandler()
# String functions
@cuda_decl_registry.register_global(len)
| StrViewArgHandler |
python | astropy__astropy | astropy/nddata/__init__.py | {
"start": 139,
"end": 838
} | class ____ related tools to manage n-dimensional array-based data (e.g.
CCD images, IFU Data, grid-based simulation data, ...). This is more than
just `numpy.ndarray` objects, because it provides metadata that cannot
be easily provided by a single array.
"""
from astropy import config as _config
from .bitmask import *
from .blocks import *
from .ccddata import *
from .compat import *
from .covariance import *
from .decorators import *
from .flag_collection import *
from .mixins.ndarithmetic import *
from .mixins.ndio import *
from .mixins.ndslicing import *
from .nddata import *
from .nddata_base import *
from .nddata_withmixins import *
from .nduncertainty import *
from .utils import *
| and |
python | squidfunk__mkdocs-material | material/plugins/meta/plugin.py | {
"start": 1605,
"end": 5067
} | class ____(BasePlugin[MetaConfig]):
# Construct metadata mapping
def on_files(self, files, *, config):
if not self.config.enabled:
return
# Initialize mapping
self.meta = {}
# Resolve and load meta files in docs directory
docs = os.path.relpath(config.docs_dir)
for file in files:
name = posixpath.basename(file.src_uri)
if not name == self.config.meta_file:
continue
# Exclude meta file from site directory - explicitly excluding the
# meta file allows the author to use a file name without '.' prefix
file.inclusion = InclusionLevel.EXCLUDED
# Open file and parse as YAML
with open(file.abs_src_path, encoding = "utf-8-sig") as f:
path = file.src_path
try:
self.meta[path] = load(f, SafeLoader)
# The meta file could not be loaded because of a syntax error,
# which we display to the author with a nice error message
except Exception as e:
raise PluginError(
f"Error reading meta file '{path}' in '{docs}':\n"
f"{e}"
)
# Set metadata for page, if applicable (run earlier)
@event_priority(50)
def on_page_markdown(self, markdown, *, page, config, files):
if not self.config.enabled:
return
# Start with a clean state, as we first need to apply all meta files
# that are relevant to the current page, and then merge the page meta
# on top of that to ensure that the page meta always takes precedence
# over meta files - see https://t.ly/kvCRn
meta = {}
# Merge matching meta files in level-order
strategy = Strategy.TYPESAFE_ADDITIVE
for path, defaults in self.meta.items():
if not page.file.src_path.startswith(os.path.dirname(path)):
continue
# Skip if meta file was already merged - this happens in case of
# blog posts, as they need to be merged when posts are constructed,
# which is why we need to keep track of which meta files are applied
# to what pages using the `__extends` key.
page.meta.setdefault("__extends", [])
if path in page.meta["__extends"]:
continue
# Try to merge metadata
try:
merge(meta, defaults, strategy = strategy)
page.meta["__extends"].append(path)
# Merging the metadata with the given strategy resulted in an error,
# which we display to the author with a nice error message
except Exception as e:
docs = os.path.relpath(config.docs_dir)
raise PluginError(
f"Error merging meta file '{path}' in '{docs}':\n"
f"{e}"
)
# Ensure page metadata is merged last, so the author can override any
# defaults from the meta files, or even remove them entirely
page.meta = merge(meta, page.meta, strategy = strategy)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.meta")
| MetaPlugin |
python | pydantic__pydantic | pydantic/types.py | {
"start": 46258,
"end": 52004
} | class ____(_SecretBase[SecretType]):
"""A generic base class used for defining a field with sensitive information that you do not want to be visible in logging or tracebacks.
You may either directly parametrize `Secret` with a type, or subclass from `Secret` with a parametrized type. The benefit of subclassing
is that you can define a custom `_display` method, which will be used for `repr()` and `str()` methods. The examples below demonstrate both
ways of using `Secret` to create a new secret type.
1. Directly parametrizing `Secret` with a type:
```python
from pydantic import BaseModel, Secret
SecretBool = Secret[bool]
class Model(BaseModel):
secret_bool: SecretBool
m = Model(secret_bool=True)
print(m.model_dump())
#> {'secret_bool': Secret('**********')}
print(m.model_dump_json())
#> {"secret_bool":"**********"}
print(m.secret_bool.get_secret_value())
#> True
```
2. Subclassing from parametrized `Secret`:
```python
from datetime import date
from pydantic import BaseModel, Secret
class SecretDate(Secret[date]):
def _display(self) -> str:
return '****/**/**'
class Model(BaseModel):
secret_date: SecretDate
m = Model(secret_date=date(2022, 1, 1))
print(m.model_dump())
#> {'secret_date': SecretDate('****/**/**')}
print(m.model_dump_json())
#> {"secret_date":"****/**/**"}
print(m.secret_date.get_secret_value())
#> 2022-01-01
```
The value returned by the `_display` method will be used for `repr()` and `str()`.
You can enforce constraints on the underlying type through annotations:
For example:
```python
from typing import Annotated
from pydantic import BaseModel, Field, Secret, ValidationError
SecretPosInt = Secret[Annotated[int, Field(gt=0, strict=True)]]
class Model(BaseModel):
sensitive_int: SecretPosInt
m = Model(sensitive_int=42)
print(m.model_dump())
#> {'sensitive_int': Secret('**********')}
try:
m = Model(sensitive_int=-42) # (1)!
except ValidationError as exc_info:
print(exc_info.errors(include_url=False, include_input=False))
'''
[
{
'type': 'greater_than',
'loc': ('sensitive_int',),
'msg': 'Input should be greater than 0',
'ctx': {'gt': 0},
}
]
'''
try:
m = Model(sensitive_int='42') # (2)!
except ValidationError as exc_info:
print(exc_info.errors(include_url=False, include_input=False))
'''
[
{
'type': 'int_type',
'loc': ('sensitive_int',),
'msg': 'Input should be a valid integer',
}
]
'''
```
1. The input value is not greater than 0, so it raises a validation error.
2. The input value is not an integer, so it raises a validation error because the `SecretPosInt` type has strict mode enabled.
"""
def _display(self) -> str | bytes:
return '**********' if self.get_secret_value() else ''
@classmethod
def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
inner_type = None
# if origin_type is Secret, then cls is a GenericAlias, and we can extract the inner type directly
origin_type = get_origin(source)
if origin_type is not None:
inner_type = get_args(source)[0]
# otherwise, we need to get the inner type from the base class
else:
bases = getattr(cls, '__orig_bases__', getattr(cls, '__bases__', []))
for base in bases:
if get_origin(base) is Secret:
inner_type = get_args(base)[0]
if bases == [] or inner_type is None:
raise TypeError(
f"Can't get secret type from {cls.__name__}. "
'Please use Secret[<type>], or subclass from Secret[<type>] instead.'
)
inner_schema = handler.generate_schema(inner_type) # type: ignore
def validate_secret_value(value, handler) -> Secret[SecretType]:
if isinstance(value, Secret):
value = value.get_secret_value()
validated_inner = handler(value)
return cls(validated_inner)
return core_schema.json_or_python_schema(
python_schema=core_schema.no_info_wrap_validator_function(
validate_secret_value,
inner_schema,
),
json_schema=core_schema.no_info_after_validator_function(lambda x: cls(x), inner_schema),
serialization=core_schema.plain_serializer_function_ser_schema(
_serialize_secret,
info_arg=True,
when_used='always',
),
)
__pydantic_serializer__ = SchemaSerializer(
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
_serialize_secret,
info_arg=True,
when_used='always',
)
)
)
def _secret_display(value: SecretType) -> str: # type: ignore
return '**********' if value else ''
def _serialize_secret_field(
value: _SecretField[SecretType], info: core_schema.SerializationInfo
) -> str | _SecretField[SecretType]:
if info.mode == 'json':
# we want the output to always be string without the `b'` prefix for bytes,
# hence we just use `secret_display`
return _secret_display(value.get_secret_value())
else:
return value
| Secret |
python | explosion__spaCy | spacy/lang/pl/lemmatizer.py | {
"start": 103,
"end": 3566
} | class ____(Lemmatizer):
# This lemmatizer implements lookup lemmatization based on the Morfeusz
# dictionary (morfeusz.sgjp.pl/en) by Institute of Computer Science PAS.
# It utilizes some prefix based improvements for verb and adjectives
# lemmatization, as well as case-sensitive lemmatization for nouns.
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "pos_lookup":
# fmt: off
required = [
"lemma_lookup_adj", "lemma_lookup_adp", "lemma_lookup_adv",
"lemma_lookup_aux", "lemma_lookup_noun", "lemma_lookup_num",
"lemma_lookup_part", "lemma_lookup_pron", "lemma_lookup_verb"
]
# fmt: on
return (required, [])
else:
return super().get_lookups_config(mode)
def pos_lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_
morphology = token.morph.to_dict()
lookup_pos = univ_pos.lower()
if univ_pos == "PROPN":
lookup_pos = "noun"
lookup_table = self.lookups.get_table("lemma_lookup_" + lookup_pos, {})
if univ_pos == "NOUN":
return self.lemmatize_noun(string, morphology, lookup_table)
if univ_pos != "PROPN":
string = string.lower()
if univ_pos == "ADJ":
return self.lemmatize_adj(string, morphology, lookup_table)
elif univ_pos == "VERB":
return self.lemmatize_verb(string, morphology, lookup_table)
return [lookup_table.get(string, string.lower())]
def lemmatize_adj(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method utilizes different procedures for adjectives
# with 'nie' and 'naj' prefixes
if string[:3] == "nie":
search_string = string[3:]
if search_string[:3] == "naj":
naj_search_string = search_string[3:]
if naj_search_string in lookup_table:
return [lookup_table[naj_search_string]]
if search_string in lookup_table:
return [lookup_table[search_string]]
if string[:3] == "naj":
naj_search_string = string[3:]
if naj_search_string in lookup_table:
return [lookup_table[naj_search_string]]
return [lookup_table.get(string, string)]
def lemmatize_verb(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method utilizes a different procedure for verbs
# with 'nie' prefix
if string[:3] == "nie":
search_string = string[3:]
if search_string in lookup_table:
return [lookup_table[search_string]]
return [lookup_table.get(string, string)]
def lemmatize_noun(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method is case-sensitive, in order to work
# for incorrectly tagged proper names
if string != string.lower():
if string.lower() in lookup_table:
return [lookup_table[string.lower()]]
elif string in lookup_table:
return [lookup_table[string]]
return [string.lower()]
return [lookup_table.get(string, string)]
| PolishLemmatizer |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_dataproc_metastore.py | {
"start": 1897,
"end": 7703
} | class ____:
@pytest.mark.parametrize(
("requested_partitions", "result_files_with_rows", "expected_result"),
[
(None, [(RESULT_FILE_NAME_1, [])], False),
([None], [(RESULT_FILE_NAME_1, [])], False),
(None, [(RESULT_FILE_NAME_1, [ROW_1])], True),
([None], [(RESULT_FILE_NAME_1, [ROW_1])], True),
(None, [(RESULT_FILE_NAME_1, [ROW_1, ROW_2])], True),
([None], [(RESULT_FILE_NAME_1, [ROW_1, ROW_2])], True),
([PARTITION_1], [(RESULT_FILE_NAME_1, [])], False),
([PARTITION_1], [(RESULT_FILE_NAME_1, [ROW_1])], True),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, [])], False),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, [ROW_1])], False),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, [ROW_1, ROW_2])], True),
([PARTITION_1, PARTITION_1], [(RESULT_FILE_NAME_1, [])], False),
([PARTITION_1, PARTITION_1], [(RESULT_FILE_NAME_1, [ROW_1])], True),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, []), (RESULT_FILE_NAME_2, [])], False),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, [ROW_1]), (RESULT_FILE_NAME_2, [])], False),
([PARTITION_1, PARTITION_2], [(RESULT_FILE_NAME_1, []), (RESULT_FILE_NAME_2, [ROW_2])], False),
(
[PARTITION_1, PARTITION_2],
[(RESULT_FILE_NAME_1, [ROW_1]), (RESULT_FILE_NAME_2, [ROW_2])],
True,
),
],
)
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("DataprocMetastoreHook"))
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("parse_json_from_gcs"))
def test_poke_positive_manifest(
self,
mock_parse_json_from_gcs,
mock_hook,
requested_partitions,
result_files_with_rows,
expected_result,
):
manifest = deepcopy(MANIFEST_SUCCESS)
parse_json_from_gcs_side_effect = []
for file_name, rows in result_files_with_rows:
manifest["filenames"].append(file_name)
file = deepcopy(RESULT_FILE_CONTENT)
file["rows"] = rows
parse_json_from_gcs_side_effect.append(file)
mock_parse_json_from_gcs.side_effect = [manifest, *parse_json_from_gcs_side_effect]
sensor = MetastoreHivePartitionSensor(
task_id=TEST_TASK_ID,
service_id=TEST_SERVICE_ID,
region=TEST_REGION,
table=TEST_TABLE,
partitions=requested_partitions,
gcp_conn_id=GCP_CONN_ID,
)
assert sensor.poke(context={}) == expected_result
@pytest.mark.parametrize("empty_manifest", [dict(), list(), tuple(), None, ""])
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("DataprocMetastoreHook"))
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("parse_json_from_gcs"))
def test_poke_empty_manifest(self, mock_parse_json_from_gcs, mock_hook, empty_manifest):
mock_parse_json_from_gcs.return_value = empty_manifest
sensor = MetastoreHivePartitionSensor(
task_id=TEST_TASK_ID,
service_id=TEST_SERVICE_ID,
region=TEST_REGION,
table=TEST_TABLE,
partitions=[PARTITION_1],
gcp_conn_id=GCP_CONN_ID,
)
with pytest.raises(AirflowException):
sensor.poke(context={})
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("DataprocMetastoreHook"))
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("parse_json_from_gcs"))
def test_poke_wrong_status(self, mock_parse_json_from_gcs, mock_hook):
error_message = "Test error message"
mock_parse_json_from_gcs.return_value = {"code": 1, "message": error_message}
sensor = MetastoreHivePartitionSensor(
task_id=TEST_TASK_ID,
service_id=TEST_SERVICE_ID,
region=TEST_REGION,
table=TEST_TABLE,
partitions=[PARTITION_1],
gcp_conn_id=GCP_CONN_ID,
)
with pytest.raises(AirflowException, match=f"Request failed: {error_message}"):
sensor.poke(context={})
@pytest.mark.parametrize(
("requested_partitions", "result_files_with_rows", "expected_result"),
[
([PARTITION_1, PARTITION_1], [(RESULT_FILE_NAME_1, [ROW_1])], True),
],
)
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("DataprocMetastoreHook"))
@mock.patch(DATAPROC_METASTORE_SENSOR_PATH.format("parse_json_from_gcs"))
def test_file_uri(
self,
mock_parse_json_from_gcs,
mock_hook,
requested_partitions,
result_files_with_rows,
expected_result,
):
mock_hook.return_value.wait_for_operation.return_value = mock.MagicMock(result_manifest_uri=TEST_URI)
manifest = deepcopy(MANIFEST_SUCCESS)
parse_json_from_gcs_side_effect = []
for file_name, rows in result_files_with_rows:
manifest["filenames"].append(file_name)
file = deepcopy(RESULT_FILE_CONTENT)
file["rows"] = rows
parse_json_from_gcs_side_effect.append(file)
mock_parse_json_from_gcs.side_effect = [manifest, *parse_json_from_gcs_side_effect]
sensor = MetastoreHivePartitionSensor(
task_id=TEST_TASK_ID,
service_id=TEST_SERVICE_ID,
region=TEST_REGION,
table=TEST_TABLE,
partitions=requested_partitions,
gcp_conn_id=GCP_CONN_ID,
)
assert sensor.poke(context={}) == expected_result
mock_parse_json_from_gcs.assert_called_with(
file_uri=TEST_URI + "/" + RESULT_FILE_NAME_1, gcp_conn_id=GCP_CONN_ID, impersonation_chain=None
)
| TestMetastoreHivePartitionSensor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.