language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Textualize__textual | tests/test_focus.py | {
"start": 380,
"end": 470
} | class ____(Widget, can_focus=False, can_focus_children=True):
pass
| ChildrenFocusableOnly |
python | walkccc__LeetCode | solutions/517. Super Washing Machines/517.py | {
"start": 0,
"end": 343
} | class ____:
def findMinMoves(self, machines: list[int]) -> int:
dresses = sum(machines)
if dresses % len(machines) != 0:
return -1
ans = 0
average = dresses // len(machines)
inout = 0
for dress in machines:
inout += dress - average
ans = max(ans, abs(inout), dress - average)
return ans
| Solution |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 9427,
"end": 9928
} | class ____(Node):
"""Represents a parameter of a function definition.
Attributes:
name: The name of the parameter.
type: The type of the parameter.
kind: The kind of parameter (e.g., ParameterKind.KWONLY).
optional: If the parameter is optional.
mutated_type: The type the parameter will have after the function is called
if the type is mutated, None otherwise.
"""
name: str
type: TypeU
kind: ParameterKind
optional: bool
mutated_type: TypeU | None
| Parameter |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 56384,
"end": 58069
} | class ____(TorchDispatchMode):
@classmethod
def ignore_compile_internals(cls):
return True
# Used together with _CachedTorchDispatchMode to implement SAC.
def __init__(self, policy_fn, storage) -> None:
self.policy_fn = policy_fn
self.storage = storage
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if func in SAC_IGNORED_OPS:
return func(*args, **kwargs)
kwargs = {} if kwargs is None else kwargs
policy = self.policy_fn(SelectiveCheckpointContext(is_recompute=False),
func, *args, **kwargs)
if isinstance(policy, bool):
policy = _policy_from_bool(policy)
is_compiling = _is_compiling(func, args, kwargs)
if is_compiling:
# Overwrite each node's "recompute" tag to add in the user annotation.
fx_traceback.current_meta["recompute"] = policy
out = func(*args, **kwargs)
# HOPs don't support func._schema
# HOPs don't alias -> this is always true today and will be always true for a long time
# TODO HOPs don't mutate -> this is always true today but will not be true forever
if isinstance(func, torch._ops.HigherOrderOperator):
any_ret_has_alias_info = False
else:
any_ret_has_alias_info = any(ret.alias_info is not None for ret in func._schema.returns)
if policy in (CheckpointPolicy.MUST_SAVE, CheckpointPolicy.PREFER_SAVE) or is_compiling:
self.storage[func].append(tree_map(lambda x: _VersionWrapper(_maybe_detach(x, any_ret_has_alias_info)), out))
return out
| _CachingTorchDispatchMode |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 31002,
"end": 32517
} | class ____(IHaveNew):
"""Serializable data associated with an asset check."""
name: str
asset_key: AssetKey
description: Optional[str]
execution_set_identifier: Optional[str]
job_names: Sequence[str]
blocking: bool
additional_asset_keys: Sequence[AssetKey]
automation_condition: Optional[AutomationCondition]
automation_condition_snapshot: Optional[AutomationConditionSnapshot]
def __new__(
cls,
name: str,
asset_key: AssetKey,
description: Optional[str],
execution_set_identifier: Optional[str] = None,
job_names: Optional[Sequence[str]] = None,
blocking: bool = False,
additional_asset_keys: Optional[Sequence[AssetKey]] = None,
automation_condition: Optional[AutomationCondition] = None,
automation_condition_snapshot: Optional[AutomationConditionSnapshot] = None,
):
return super().__new__(
cls,
name=name,
asset_key=asset_key,
description=description,
execution_set_identifier=execution_set_identifier,
job_names=job_names or [],
blocking=blocking,
additional_asset_keys=additional_asset_keys or [],
automation_condition=automation_condition,
automation_condition_snapshot=automation_condition_snapshot,
)
@property
def key(self) -> AssetCheckKey:
return AssetCheckKey(asset_key=self.asset_key, name=self.name)
| AssetCheckNodeSnap |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 22844,
"end": 23568
} | class ____:
"""Test ko_KR company provider methods"""
def test_company_name_word(self, faker, num_samples):
for _ in range(num_samples):
word = faker.company_name_word()
assert isinstance(word, str)
assert word in KoKrCompanyProvider.company_name_words
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in KoKrCompanyProvider.company_suffixes
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
| TestKoKr |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_dag_command.py | {
"start": 2482,
"end": 40190
} | class ____:
parser: argparse.ArgumentParser
@classmethod
def setup_class(cls):
parse_and_sync_to_db(os.devnull, include_examples=True)
cls.parser = cli_parser.get_parser()
@classmethod
def teardown_class(cls) -> None:
clear_db_runs()
clear_db_dags()
def setup_method(self):
clear_db_runs()
clear_db_import_errors()
def teardown_method(self):
clear_db_import_errors()
def test_show_dag_dependencies_print(self, stdout_capture):
with stdout_capture as temp_stdout:
dag_command.dag_dependencies_show(self.parser.parse_args(["dags", "show-dependencies"]))
out = temp_stdout.getvalue()
assert "digraph" in out
assert "graph [rankdir=LR]" in out
@mock.patch("airflow.cli.commands.dag_command.render_dag_dependencies")
def test_show_dag_dependencies_save(self, mock_render_dag_dependencies, stdout_capture):
with stdout_capture as temp_stdout:
dag_command.dag_dependencies_show(
self.parser.parse_args(["dags", "show-dependencies", "--save", "output.png"])
)
out = temp_stdout.getvalue()
mock_render_dag_dependencies.return_value.render.assert_called_once_with(
cleanup=True, filename="output", format="png"
)
assert "File output.png saved" in out
def test_show_dag_print(self, stdout_capture):
with stdout_capture as temp_stdout:
dag_command.dag_show(self.parser.parse_args(["dags", "show", "example_bash_operator"]))
out = temp_stdout.getvalue()
assert "label=example_bash_operator" in out
assert "graph [label=example_bash_operator labelloc=t rankdir=LR]" in out
assert "runme_2 -> run_after_loop" in out
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_save(self, mock_render_dag, stdout_capture):
with stdout_capture as temp_stdout:
dag_command.dag_show(
self.parser.parse_args(["dags", "show", "example_bash_operator", "--save", "awesome.png"])
)
out = temp_stdout.getvalue()
mock_render_dag.return_value.render.assert_called_once_with(
cleanup=True, filename="awesome", format="png"
)
assert "File awesome.png saved" in out
@mock.patch("airflow.cli.commands.dag_command.subprocess.Popen")
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_imgcat(self, mock_render_dag, mock_popen, stdout_capture):
mock_render_dag.return_value.pipe.return_value = b"DOT_DATA"
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.communicate.return_value = (b"OUT", b"ERR")
mock_popen.return_value.__enter__.return_value = mock_proc
with stdout_capture as temp_stdout:
dag_command.dag_show(
self.parser.parse_args(["dags", "show", "example_bash_operator", "--imgcat"])
)
out = temp_stdout.getvalue()
mock_render_dag.return_value.pipe.assert_called_once_with(format="png")
mock_proc.communicate.assert_called_once_with(b"DOT_DATA")
assert "OUT" in out
assert "ERR" in out
def test_next_execution(self, tmp_path, stdout_capture):
dag_test_list = [
("future_schedule_daily", "timedelta(days=5)", "'0 0 * * *'", "True"),
("future_schedule_every_4_hours", "timedelta(days=5)", "timedelta(hours=4)", "True"),
("future_schedule_once", "timedelta(days=5)", "'@once'", "True"),
("future_schedule_none", "timedelta(days=5)", "None", "True"),
("past_schedule_once", "timedelta(days=-5)", "'@once'", "True"),
("past_schedule_daily", "timedelta(days=-5)", "'0 0 * * *'", "True"),
("past_schedule_daily_catchup_false", "timedelta(days=-5)", "'0 0 * * *'", "False"),
]
for f in dag_test_list:
file_content = os.linesep.join(
[
"from airflow import DAG",
"from airflow.providers.standard.operators.empty import EmptyOperator",
"from datetime import timedelta; from pendulum import today",
f"dag = DAG('{f[0]}', start_date=today() + {f[1]}, schedule={f[2]}, catchup={f[3]})",
"task = EmptyOperator(task_id='empty_task',dag=dag)",
]
)
dag_file = tmp_path / f"{f[0]}.py"
dag_file.write_text(file_content)
with time_machine.travel(DEFAULT_DATE):
clear_db_dags()
parse_and_sync_to_db(tmp_path, include_examples=False)
default_run = DEFAULT_DATE
future_run = default_run + timedelta(days=5)
past_run = default_run + timedelta(days=-5)
expected_output = {
"future_schedule_daily": (
future_run.isoformat(),
future_run.isoformat() + os.linesep + (future_run + timedelta(days=1)).isoformat(),
),
"future_schedule_every_4_hours": (
future_run.isoformat(),
future_run.isoformat() + os.linesep + (future_run + timedelta(hours=4)).isoformat(),
),
"future_schedule_once": (future_run.isoformat(), future_run.isoformat() + os.linesep + "None"),
"future_schedule_none": ("None", "None"),
"past_schedule_once": (past_run.isoformat(), "None"),
"past_schedule_daily": (
past_run.isoformat(),
past_run.isoformat() + os.linesep + (past_run + timedelta(days=1)).isoformat(),
),
"past_schedule_daily_catchup_false": (
(default_run - timedelta(days=1)).isoformat(),
(default_run - timedelta(days=1)).isoformat() + os.linesep + default_run.isoformat(),
),
}
for dag_id in expected_output:
# Test num-executions = 1 (default)
args = self.parser.parse_args(["dags", "next-execution", dag_id])
with stdout_capture as temp_stdout:
dag_command.dag_next_execution(args)
out = temp_stdout.getvalue()
assert expected_output[dag_id][0] in out
# Test num-executions = 2
args = self.parser.parse_args(["dags", "next-execution", dag_id, "--num-executions", "2"])
with stdout_capture as temp_stdout:
dag_command.dag_next_execution(args)
out = temp_stdout.getvalue()
assert expected_output[dag_id][1] in out
# Rebuild Test DB for other tests
clear_db_dags()
parse_and_sync_to_db(os.devnull, include_examples=True)
@conf_vars({("core", "load_examples"): "true"})
def test_cli_report(self, stdout_capture):
args = self.parser.parse_args(["dags", "report", "--output", "json"])
with stdout_capture as temp_stdout:
dag_command.dag_report(args)
out = temp_stdout.getvalue()
assert "airflow/example_dags/example_complex.py" in out
assert "example_complex" in out
@conf_vars({("core", "load_examples"): "true"})
def test_cli_get_dag_details(self, stdout_capture):
args = self.parser.parse_args(["dags", "details", "example_complex", "--output", "yaml"])
with stdout_capture as temp_stdout:
dag_command.dag_details(args)
out = temp_stdout.getvalue()
# Check if DAG Details field are present
for field in dag_command.DAG_DETAIL_FIELDS:
assert field in out
# Check if identifying values are present
dag_details_values = ["airflow", "airflow/example_dags/example_complex.py", "16", "example_complex"]
for value in dag_details_values:
assert value in out
@conf_vars({("core", "load_examples"): "true"})
def test_cli_list_dags(self, stdout_capture):
args = self.parser.parse_args(["dags", "list", "--output", "json"])
with stdout_capture as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
dag_list = json.loads(out)
for key in ["dag_id", "fileloc", "owners", "is_paused"]: # "bundle_name", "bundle_version"?
assert key in dag_list[0]
assert any("airflow/example_dags/example_complex.py" in d["fileloc"] for d in dag_list)
@conf_vars({("core", "load_examples"): "true"})
def test_cli_list_local_dags(self, stdout_capture):
# Clear the database
clear_db_dags()
args = self.parser.parse_args(["dags", "list", "--output", "json", "--local"])
with stdout_capture as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
dag_list = json.loads(out)
for key in ["dag_id", "fileloc", "owners", "is_paused"]:
assert key in dag_list[0]
assert any("airflow/example_dags/example_complex.py" in d["fileloc"] for d in dag_list)
# Rebuild Test DB for other tests
parse_and_sync_to_db(os.devnull, include_examples=True)
@conf_vars({("core", "load_examples"): "false"})
def test_cli_list_local_dags_with_bundle_name(self, configure_testing_dag_bundle, stdout_capture):
# Clear the database
clear_db_dags()
path_to_parse = TEST_DAGS_FOLDER / "test_example_bash_operator.py"
args = self.parser.parse_args(
["dags", "list", "--output", "json", "--local", "--bundle-name", "testing"]
)
with configure_testing_dag_bundle(path_to_parse):
with stdout_capture as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
dag_list = json.loads(out)
for key in ["dag_id", "fileloc", "owners", "is_paused"]:
assert key in dag_list[0]
assert any(
str(TEST_DAGS_FOLDER / "test_example_bash_operator.py") in d["fileloc"] for d in dag_list
)
# Rebuild Test DB for other tests
parse_and_sync_to_db(os.devnull, include_examples=True)
@conf_vars({("core", "load_examples"): "true"})
def test_cli_list_dags_custom_cols(self, stdout_capture):
args = self.parser.parse_args(
["dags", "list", "--output", "json", "--columns", "dag_id,last_parsed_time"]
)
with stdout_capture as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
dag_list = json.loads(out)
for key in ["dag_id", "last_parsed_time"]:
assert key in dag_list[0]
for key in ["fileloc", "owners", "is_paused"]:
assert key not in dag_list[0]
@conf_vars({("core", "load_examples"): "true"})
def test_cli_list_dags_invalid_cols(self, stderr_capture):
args = self.parser.parse_args(["dags", "list", "--output", "json", "--columns", "dag_id,invalid_col"])
with stderr_capture as temp_stderr:
dag_command.dag_list_dags(args)
out = temp_stderr.getvalue()
assert "Ignoring the following invalid columns: ['invalid_col']" in out
@conf_vars({("core", "load_examples"): "false"})
def test_cli_list_dags_prints_import_errors(
self, configure_testing_dag_bundle, get_test_dag, stderr_capture
):
path_to_parse = TEST_DAGS_FOLDER / "test_invalid_cron.py"
get_test_dag("test_invalid_cron")
args = self.parser.parse_args(["dags", "list", "--output", "yaml", "--bundle-name", "testing"])
with configure_testing_dag_bundle(path_to_parse):
with stderr_capture as temp_stderr:
dag_command.dag_list_dags(args)
out = temp_stderr.getvalue()
assert "Failed to load all files." in out
@conf_vars({("core", "load_examples"): "false"})
def test_cli_list_dags_prints_local_import_errors(
self, configure_testing_dag_bundle, get_test_dag, stderr_capture
):
# Clear the database
clear_db_dags()
path_to_parse = TEST_DAGS_FOLDER / "test_invalid_cron.py"
get_test_dag("test_invalid_cron")
args = self.parser.parse_args(
["dags", "list", "--output", "yaml", "--bundle-name", "testing", "--local"]
)
with configure_testing_dag_bundle(path_to_parse):
with stderr_capture as temp_stderr:
dag_command.dag_list_dags(args)
out = temp_stderr.getvalue()
assert "Failed to load all files." in out
# Rebuild Test DB for other tests
parse_and_sync_to_db(os.devnull, include_examples=True)
@conf_vars({("core", "load_examples"): "true"})
@mock.patch("airflow.models.DagModel.get_dagmodel")
def test_list_dags_none_get_dagmodel(self, mock_get_dagmodel, stdout_capture):
mock_get_dagmodel.return_value = None
args = self.parser.parse_args(["dags", "list", "--output", "json"])
with stdout_capture as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
dag_list = json.loads(out)
for key in ["dag_id", "fileloc", "owners", "is_paused"]:
assert key in dag_list[0]
assert any("airflow/example_dags/example_complex.py" in d["fileloc"] for d in dag_list)
@conf_vars({("core", "load_examples"): "true"})
def test_dagbag_dag_col(self, session):
dagbag = DBDagBag()
dag_details = dag_command._get_dagbag_dag_details(
dagbag.get_latest_version_of_dag("tutorial_dag", session=session),
session=session,
)
assert sorted(dag_details) == sorted(dag_command.DAG_DETAIL_FIELDS)
@conf_vars({("core", "load_examples"): "false"})
def test_cli_list_import_errors(self, get_test_dag, configure_testing_dag_bundle, caplog):
path_to_parse = TEST_DAGS_FOLDER / "test_invalid_cron.py"
get_test_dag("test_invalid_cron")
args = self.parser.parse_args(
["dags", "list-import-errors", "--output", "yaml", "--bundle-name", "testing"]
)
with configure_testing_dag_bundle(path_to_parse):
with pytest.raises(SystemExit) as err_ctx:
with caplog.at_level(logging.ERROR):
dag_command.dag_list_import_errors(args)
log_output = caplog.text
assert err_ctx.value.code == 1
assert str(path_to_parse) in log_output
assert "[0 100 * * *] is not acceptable, out of range" in log_output
def test_cli_list_dag_runs(self):
dag_command.dag_trigger(
self.parser.parse_args(
[
"dags",
"trigger",
"example_bash_operator",
]
)
)
args = self.parser.parse_args(
[
"dags",
"list-runs",
"example_bash_operator",
"--no-backfill",
"--start-date",
DEFAULT_DATE.isoformat(),
"--end-date",
timezone.make_aware(datetime.max).isoformat(),
]
)
dag_command.dag_list_dag_runs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(
[
"dags",
"list-jobs",
"--dag-id",
"example_bash_operator",
"--state",
"success",
"--limit",
"100",
"--output",
"json",
]
)
dag_command.dag_list_jobs(args)
def test_pause(self):
args = self.parser.parse_args(["dags", "pause", "example_bash_operator"])
dag_command.dag_pause(args)
assert DagModel.get_dagmodel("example_bash_operator").is_paused
dag_command.dag_unpause(args)
assert not DagModel.get_dagmodel("example_bash_operator").is_paused
@mock.patch("airflow.cli.commands.dag_command.ask_yesno")
def test_pause_regex(self, mock_yesno):
args = self.parser.parse_args(["dags", "pause", "^example_.*$", "--treat-dag-id-as-regex"])
dag_command.dag_pause(args)
mock_yesno.assert_called_once()
assert DagModel.get_dagmodel("example_bash_decorator").is_paused
assert DagModel.get_dagmodel("example_kubernetes_executor").is_paused
assert DagModel.get_dagmodel("example_xcom_args").is_paused
args = self.parser.parse_args(["dags", "unpause", "^example_.*$", "--treat-dag-id-as-regex"])
dag_command.dag_unpause(args)
assert not DagModel.get_dagmodel("example_bash_decorator").is_paused
assert not DagModel.get_dagmodel("example_kubernetes_executor").is_paused
assert not DagModel.get_dagmodel("example_xcom_args").is_paused
@mock.patch("airflow.cli.commands.dag_command.ask_yesno")
def test_pause_regex_operation_cancelled(self, ask_yesno, capsys):
args = self.parser.parse_args(["dags", "pause", "example_bash_operator", "--treat-dag-id-as-regex"])
ask_yesno.return_value = False
dag_command.dag_pause(args)
stdout = capsys.readouterr().out
assert "Operation cancelled by user" in stdout
@mock.patch("airflow.cli.commands.dag_command.ask_yesno")
def test_pause_regex_yes(self, mock_yesno):
args = self.parser.parse_args(["dags", "pause", ".*", "--treat-dag-id-as-regex", "--yes"])
dag_command.dag_pause(args)
mock_yesno.assert_not_called()
dag_command.dag_unpause(args)
def test_pause_non_existing_dag_do_not_error(self, stdout_capture):
args = self.parser.parse_args(["dags", "pause", "non_existing_dag"])
with stdout_capture as temp_stdout:
dag_command.dag_pause(args)
out = temp_stdout.splitlines()[-1]
assert out == "No unpaused DAGs were found"
def test_unpause_non_existing_dag_do_not_error(self, stdout_capture):
args = self.parser.parse_args(["dags", "unpause", "non_existing_dag"])
with stdout_capture as temp_stdout:
dag_command.dag_unpause(args)
out = temp_stdout.splitlines()[-1]
assert out == "No paused DAGs were found"
def test_unpause_already_unpaused_dag_do_not_error(self, stdout_capture):
args = self.parser.parse_args(["dags", "unpause", "example_bash_operator", "--yes"])
with stdout_capture as temp_stdout:
dag_command.dag_unpause(args)
out = temp_stdout.splitlines()[-1]
assert out == "No paused DAGs were found"
def test_pausing_already_paused_dag_do_not_error(self, stdout_capture):
args = self.parser.parse_args(["dags", "pause", "example_bash_operator", "--yes"])
with stdout_capture as temp_stdout:
dag_command.dag_pause(args)
dag_command.dag_pause(args)
out = temp_stdout.splitlines()[-1]
assert out == "No unpaused DAGs were found"
def test_trigger_dag(self):
dag_command.dag_trigger(
self.parser.parse_args(
[
"dags",
"trigger",
"example_bash_operator",
"--run-id=test_trigger_dag",
'--conf={"foo": "bar"}',
],
),
)
with create_session() as session:
dagrun = session.query(DagRun).filter(DagRun.run_id == "test_trigger_dag").one()
assert dagrun, "DagRun not created"
assert dagrun.run_type == DagRunType.MANUAL
assert dagrun.conf == {"foo": "bar"}
# logical_date is None as it's not provided
assert dagrun.logical_date is None
# data_interval is None as logical_date is None
assert dagrun.data_interval_start is None
assert dagrun.data_interval_end is None
def test_trigger_dag_with_microseconds(self):
dag_command.dag_trigger(
self.parser.parse_args(
[
"dags",
"trigger",
"example_bash_operator",
"--run-id=test_trigger_dag_with_micro",
"--logical-date=2021-06-04T09:00:00.000001+08:00",
"--no-replace-microseconds",
],
)
)
with create_session() as session:
dagrun = session.query(DagRun).filter(DagRun.run_id == "test_trigger_dag_with_micro").one()
assert dagrun, "DagRun not created"
assert dagrun.run_type == DagRunType.MANUAL
assert dagrun.logical_date.isoformat(timespec="microseconds") == "2021-06-04T01:00:00.000001+00:00"
def test_trigger_dag_invalid_conf(self):
with pytest.raises(ValueError, match=r"Expecting value: line \d+ column \d+ \(char \d+\)"):
dag_command.dag_trigger(
self.parser.parse_args(
[
"dags",
"trigger",
"example_bash_operator",
"--run-id",
"trigger_dag_xxx",
"--conf",
"NOT JSON",
]
),
)
def test_trigger_dag_output_as_json(self, stdout_capture):
args = self.parser.parse_args(
[
"dags",
"trigger",
"example_bash_operator",
"--run-id",
"trigger_dag_xxx",
"--conf",
'{"conf1": "val1", "conf2": "val2"}',
"--output=json",
]
)
with stdout_capture as temp_stdout:
dag_command.dag_trigger(args)
# get the last line from the logs ignoring all logging lines
out = temp_stdout.getvalue().strip().splitlines()[-1]
parsed_out = json.loads(out)
assert len(parsed_out) == 1
assert parsed_out[0]["dag_id"] == "example_bash_operator"
assert parsed_out[0]["dag_run_id"] == "trigger_dag_xxx"
assert parsed_out[0]["conf"] == {"conf1": "val1", "conf2": "val2"}
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key, bundle_name="dags-folder"))
session.commit()
dag_command.dag_delete(self.parser.parse_args(["dags", "delete", key, "--yes"]))
assert session.query(DM).filter_by(dag_id=key).count() == 0
with pytest.raises(AirflowException):
dag_command.dag_delete(
self.parser.parse_args(["dags", "delete", "does_not_exist_dag", "--yes"]),
)
def test_dag_delete_when_backfill_and_dagrun_exist(self):
# Test to check that the DAG should be deleted even if
# there are backfill records associated with it.
from airflow.models.backfill import Backfill
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key, bundle_name="dags-folder"))
_backfill = Backfill(dag_id=key, from_date=DEFAULT_DATE, to_date=DEFAULT_DATE + timedelta(days=1))
session.add(_backfill)
# To create the backfill_id in DagRun
session.flush()
session.add(
DagRun(
dag_id=key,
run_id="backfill__" + key,
state=DagRunState.SUCCESS,
run_type="backfill",
backfill_id=_backfill.id,
)
)
session.commit()
dag_command.dag_delete(self.parser.parse_args(["dags", "delete", key, "--yes"]))
assert session.query(DM).filter_by(dag_id=key).count() == 0
with pytest.raises(AirflowException):
dag_command.dag_delete(
self.parser.parse_args(["dags", "delete", "does_not_exist_dag", "--yes"]),
)
def test_delete_dag_existing_file(self, tmp_path):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
path = tmp_path / "testfile"
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key, bundle_name="dags-folder", fileloc=os.fspath(path)))
session.commit()
dag_command.dag_delete(self.parser.parse_args(["dags", "delete", key, "--yes"]))
assert session.query(DM).filter_by(dag_id=key).count() == 0
def test_cli_list_jobs(self):
args = self.parser.parse_args(["dags", "list-jobs"])
dag_command.dag_list_jobs(args)
def test_dag_state(self):
assert (
dag_command.dag_state(
self.parser.parse_args(["dags", "state", "example_bash_operator", DEFAULT_DATE.isoformat()])
)
is None
)
@mock.patch("airflow.cli.commands.dag_command.get_bagged_dag")
def test_dag_test(self, mock_get_dag):
cli_args = self.parser.parse_args(["dags", "test", "example_bash_operator", DEFAULT_DATE.isoformat()])
dag_command.dag_test(cli_args)
mock_get_dag.assert_has_calls(
[
mock.call(bundle_names=None, dag_id="example_bash_operator", dagfile_path=None),
mock.call().__bool__(),
mock.call().test(
logical_date=timezone.parse(DEFAULT_DATE.isoformat()),
run_conf=None,
use_executor=False,
mark_success_pattern=None,
),
]
)
@mock.patch("airflow.cli.commands.dag_command.get_bagged_dag")
def test_dag_test_fail_raise_error(self, mock_get_dag):
logical_date_str = DEFAULT_DATE.isoformat()
mock_get_dag.return_value.test.return_value = DagRun(
dag_id="example_bash_operator", logical_date=DEFAULT_DATE, state=DagRunState.FAILED
)
cli_args = self.parser.parse_args(["dags", "test", "example_bash_operator", logical_date_str])
with pytest.raises(SystemExit, match=r"DagRun failed"):
dag_command.dag_test(cli_args)
@mock.patch("airflow.cli.commands.dag_command.get_bagged_dag")
def test_dag_test_no_logical_date(self, mock_get_dag, time_machine):
now = pendulum.now()
time_machine.move_to(now, tick=False)
cli_args = self.parser.parse_args(["dags", "test", "example_bash_operator"])
assert cli_args.logical_date is None
dag_command.dag_test(cli_args)
mock_get_dag.assert_has_calls(
[
mock.call(bundle_names=None, dag_id="example_bash_operator", dagfile_path=None),
mock.call().__bool__(),
mock.call().test(
logical_date=mock.ANY,
run_conf=None,
use_executor=False,
mark_success_pattern=None,
),
]
)
@mock.patch("airflow.cli.commands.dag_command.get_bagged_dag")
def test_dag_test_conf(self, mock_get_dag):
cli_args = self.parser.parse_args(
[
"dags",
"test",
"example_bash_operator",
DEFAULT_DATE.isoformat(),
"-c",
'{"dag_run_conf_param": "param_value"}',
]
)
dag_command.dag_test(cli_args)
mock_get_dag.assert_has_calls(
[
mock.call(bundle_names=None, dag_id="example_bash_operator", dagfile_path=None),
mock.call().__bool__(),
mock.call().test(
logical_date=timezone.parse(DEFAULT_DATE.isoformat()),
run_conf={"dag_run_conf_param": "param_value"},
use_executor=False,
mark_success_pattern=None,
),
]
)
@mock.patch("airflow.cli.commands.dag_command.render_dag", return_value=MagicMock(source="SOURCE"))
@mock.patch("airflow.cli.commands.dag_command.get_bagged_dag")
def test_dag_test_show_dag(self, mock_get_dag, mock_render_dag, stdout_capture):
mock_get_dag.return_value.test.return_value.run_id = "__test_dag_test_show_dag_fake_dag_run_run_id__"
cli_args = self.parser.parse_args(
["dags", "test", "example_bash_operator", DEFAULT_DATE.isoformat(), "--show-dagrun"]
)
with stdout_capture as stdout:
dag_command.dag_test(cli_args)
output = stdout.getvalue()
mock_get_dag.assert_has_calls(
[
mock.call(bundle_names=None, dag_id="example_bash_operator", dagfile_path=None),
mock.call().__bool__(),
mock.call().test(
logical_date=timezone.parse(DEFAULT_DATE.isoformat()),
run_conf=None,
use_executor=False,
mark_success_pattern=None,
),
]
)
mock_render_dag.assert_has_calls([mock.call(mock_get_dag.return_value, tis=[])])
assert "SOURCE" in output
@mock.patch("airflow.dag_processing.dagbag.DagBag")
def test_dag_test_with_bundle_name(self, mock_dagbag, configure_dag_bundles):
"""Test that DAG can be tested using bundle name."""
mock_dagbag.return_value.get_dag.return_value.test.return_value = DagRun(
dag_id="test_example_bash_operator", logical_date=DEFAULT_DATE, state=DagRunState.SUCCESS
)
cli_args = self.parser.parse_args(
[
"dags",
"test",
"test_example_bash_operator",
DEFAULT_DATE.isoformat(),
"--bundle-name",
"testing",
]
)
with configure_dag_bundles({"testing": TEST_DAGS_FOLDER}):
dag_command.dag_test(cli_args)
mock_dagbag.assert_called_once_with(
bundle_path=TEST_DAGS_FOLDER,
dag_folder=TEST_DAGS_FOLDER,
bundle_name="testing",
include_examples=False,
)
@mock.patch("airflow.dag_processing.dagbag.DagBag")
def test_dag_test_with_dagfile_path(self, mock_dagbag, configure_dag_bundles):
"""Test that DAG can be tested using dagfile path."""
mock_dagbag.return_value.get_dag.return_value.test.return_value = DagRun(
dag_id="test_example_bash_operator", logical_date=DEFAULT_DATE, state=DagRunState.SUCCESS
)
dag_file = TEST_DAGS_FOLDER / "test_example_bash_operator.py"
cli_args = self.parser.parse_args(
["dags", "test", "test_example_bash_operator", "--dagfile-path", str(dag_file)]
)
with configure_dag_bundles({"testing": TEST_DAGS_FOLDER}):
dag_command.dag_test(cli_args)
mock_dagbag.assert_called_once_with(
bundle_path=TEST_DAGS_FOLDER,
dag_folder=str(dag_file),
bundle_name="testing",
include_examples=False,
)
@mock.patch("airflow.dag_processing.dagbag.DagBag")
def test_dag_test_with_both_bundle_and_dagfile_path(self, mock_dagbag, configure_dag_bundles):
"""Test that DAG can be tested using both bundle name and dagfile path."""
mock_dagbag.return_value.get_dag.return_value.test.return_value = DagRun(
dag_id="test_example_bash_operator", logical_date=DEFAULT_DATE, state=DagRunState.SUCCESS
)
dag_file = TEST_DAGS_FOLDER / "test_example_bash_operator.py"
cli_args = self.parser.parse_args(
[
"dags",
"test",
"test_example_bash_operator",
DEFAULT_DATE.isoformat(),
"--bundle-name",
"testing",
"--dagfile-path",
str(dag_file),
]
)
with configure_dag_bundles({"testing": TEST_DAGS_FOLDER}):
dag_command.dag_test(cli_args)
mock_dagbag.assert_called_once_with(
bundle_path=TEST_DAGS_FOLDER,
dag_folder=str(dag_file),
bundle_name="testing",
include_examples=False,
)
@mock.patch("airflow.models.dagrun.get_or_create_dagrun")
def test_dag_test_with_custom_timetable(self, mock_get_or_create_dagrun):
"""
when calling `dags test` on dag with custom timetable, the DagRun object should be created with
data_intervals.
"""
cli_args = self.parser.parse_args(
["dags", "test", "example_workday_timetable", DEFAULT_DATE.isoformat()]
)
from airflow.example_dags.plugins.workday import AfterWorkdayTimetable
with mock.patch.object(AfterWorkdayTimetable, "get_next_workday", return_value=DEFAULT_DATE):
dag_command.dag_test(cli_args)
assert "data_interval" in mock_get_or_create_dagrun.call_args.kwargs
@mock.patch("airflow.models.dagrun.get_or_create_dagrun")
def test_dag_with_parsing_context(
self, mock_get_or_create_dagrun, testing_dag_bundle, configure_testing_dag_bundle
):
"""
airflow parsing context should be set when calling `dags test`.
"""
path_to_parse = TEST_DAGS_FOLDER / "test_dag_parsing_context.py"
with configure_testing_dag_bundle(path_to_parse):
bag = DagBag(dag_folder=path_to_parse, include_examples=False)
sync_bag_to_db(bag, "testing", None)
cli_args = self.parser.parse_args(
["dags", "test", "test_dag_parsing_context", DEFAULT_DATE.isoformat()]
)
dag_command.dag_test(cli_args)
# if dag_parsing_context is not set, this DAG will only have 1 task
assert len(mock_get_or_create_dagrun.call_args[1]["dag"].task_ids) == 2
def test_dag_test_run_inline_trigger(self, dag_maker):
now = timezone.utcnow()
trigger = DateTimeTrigger(moment=now)
task_sdk_ti = MagicMock()
task_sdk_ti.id = 1234
e = _run_inline_trigger(trigger, task_sdk_ti)
assert isinstance(e, TriggerEvent)
assert e.payload == now
def test_dag_test_no_triggerer_running(self, dag_maker):
with mock.patch(
"airflow.sdk.definitions.dag._run_inline_trigger", wraps=_run_inline_trigger
) as mock_run:
with dag_maker() as dag:
@task
def one():
return 1
@task
def two(val):
return val + 1
trigger = TimeDeltaTrigger(timedelta(seconds=0))
class MyOp(BaseOperator):
template_fields = ("tfield",)
def __init__(self, tfield, **kwargs):
self.tfield = tfield
super().__init__(**kwargs)
def execute(self, context, event=None):
if event is None:
print("I AM DEFERRING")
self.defer(trigger=trigger, method_name="execute")
return
print("RESUMING")
assert self.tfield + 1 == 3
task_one = one()
task_two = two(task_one)
op = MyOp(task_id="abc", tfield=task_two)
task_two >> op
sync_dag_to_db(dag)
dr = dag.test()
trigger_arg = mock_run.call_args_list[0].args[0]
assert isinstance(trigger_arg, DateTimeTrigger)
assert trigger_arg.moment == trigger.moment
tis = dr.get_task_instances()
assert next(x for x in tis if x.task_id == "abc").state == "success"
@mock.patch("airflow.sdk.execution_time.task_runner._execute_task")
def test_dag_test_with_mark_success(self, mock__execute_task):
"""
option `--mark-success-pattern` should mark matching tasks as success without executing them.
"""
cli_args = self.parser.parse_args(
[
"dags",
"test",
"example_sensor_decorator",
datetime(2024, 1, 1, 0, 0, 0).isoformat(),
"--mark-success-pattern",
"wait_for_upstream",
]
)
dag_command.dag_test(cli_args)
# only second operator was actually executed, first one was marked as success
assert len(mock__execute_task.call_args_list) == 1
assert mock__execute_task.call_args_list[0].kwargs["ti"].task_id == "dummy_operator"
@conf_vars({("core", "load_examples"): "false"})
def test_get_dag_excludes_examples_with_bundle(self, configure_testing_dag_bundle):
"""Test that example DAGs are excluded when bundle names are passed."""
try:
from airflow.utils.cli import get_bagged_dag
except ImportError: # Prior to Airflow 3.1.0.
from airflow.utils.cli import get_dag as get_bagged_dag # type: ignore
with configure_testing_dag_bundle(TEST_DAGS_FOLDER / "test_sensor.py"):
# example DAG should not be found since include_examples=False
with pytest.raises(AirflowException, match="could not be found"):
get_bagged_dag(bundle_names=["testing"], dag_id="example_simplest_dag")
# However, "test_sensor.py" should exist
dag = get_bagged_dag(bundle_names=["testing"], dag_id="test_sensor")
assert dag.dag_id == "test_sensor"
| TestCliDags |
python | matplotlib__matplotlib | galleries/examples/user_interfaces/fourier_demo_wx_sgskip.py | {
"start": 3176,
"end": 8292
} | class ____(wx.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
panel = wx.Panel(self)
# create the GUI elements
self.createCanvas(panel)
self.createSliders(panel)
# place them in a sizer for the Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.EXPAND)
sizer.Add(self.frequencySliderGroup.sizer, 0,
wx.EXPAND | wx.ALL, border=5)
sizer.Add(self.amplitudeSliderGroup.sizer, 0,
wx.EXPAND | wx.ALL, border=5)
panel.SetSizer(sizer)
def createCanvas(self, parent):
self.lines = []
self.figure = Figure()
self.canvas = FigureCanvas(parent, -1, self.figure)
self.canvas.callbacks.connect('button_press_event', self.mouseDown)
self.canvas.callbacks.connect('motion_notify_event', self.mouseMotion)
self.canvas.callbacks.connect('button_release_event', self.mouseUp)
self.state = ''
self.mouseInfo = (None, None, None, None)
self.f0 = Param(2., minimum=0., maximum=6.)
self.A = Param(1., minimum=0.01, maximum=2.)
self.createPlots()
# Not sure I like having two params attached to the same Knob,
# but that is what we have here... it works but feels kludgy -
# although maybe it's not too bad since the knob changes both params
# at the same time (both f0 and A are affected during a drag)
self.f0.attach(self)
self.A.attach(self)
def createSliders(self, panel):
self.frequencySliderGroup = SliderGroup(
panel,
label='Frequency f0:',
param=self.f0)
self.amplitudeSliderGroup = SliderGroup(panel, label=' Amplitude a:',
param=self.A)
def mouseDown(self, event):
if self.lines[0].contains(event)[0]:
self.state = 'frequency'
elif self.lines[1].contains(event)[0]:
self.state = 'time'
else:
self.state = ''
self.mouseInfo = (event.xdata, event.ydata,
max(self.f0.value, .1),
self.A.value)
def mouseMotion(self, event):
if self.state == '':
return
x, y = event.xdata, event.ydata
if x is None: # outside the Axes
return
x0, y0, f0Init, AInit = self.mouseInfo
self.A.set(AInit + (AInit * (y - y0) / y0), self)
if self.state == 'frequency':
self.f0.set(f0Init + (f0Init * (x - x0) / x0))
elif self.state == 'time':
if (x - x0) / x0 != -1.:
self.f0.set(1. / (1. / f0Init + (1. / f0Init * (x - x0) / x0)))
def mouseUp(self, event):
self.state = ''
def createPlots(self):
# This method creates the subplots, waveforms and labels.
# Later, when the waveforms or sliders are dragged, only the
# waveform data will be updated (not here, but below in setKnob).
self.subplot1, self.subplot2 = self.figure.subplots(2)
x1, y1, x2, y2 = self.compute(self.f0.value, self.A.value)
color = (1., 0., 0.)
self.lines += self.subplot1.plot(x1, y1, color=color, linewidth=2)
self.lines += self.subplot2.plot(x2, y2, color=color, linewidth=2)
# Set some plot attributes
self.subplot1.set_title(
"Click and drag waveforms to change frequency and amplitude",
fontsize=12)
self.subplot1.set_ylabel("Frequency Domain Waveform X(f)", fontsize=8)
self.subplot1.set_xlabel("frequency f", fontsize=8)
self.subplot2.set_ylabel("Time Domain Waveform x(t)", fontsize=8)
self.subplot2.set_xlabel("time t", fontsize=8)
self.subplot1.set_xlim(-6, 6)
self.subplot1.set_ylim(0, 1)
self.subplot2.set_xlim(-2, 2)
self.subplot2.set_ylim(-2, 2)
self.subplot1.text(0.05, .95,
r'$X(f) = \mathcal{F}\{x(t)\}$',
verticalalignment='top',
transform=self.subplot1.transAxes)
self.subplot2.text(0.05, .95,
r'$x(t) = a \cdot \cos(2\pi f_0 t) e^{-\pi t^2}$',
verticalalignment='top',
transform=self.subplot2.transAxes)
def compute(self, f0, A):
f = np.arange(-6., 6., 0.02)
t = np.arange(-2., 2., 0.01)
x = A * np.cos(2 * np.pi * f0 * t) * np.exp(-np.pi * t ** 2)
X = A / 2 * \
(np.exp(-np.pi * (f - f0) ** 2) + np.exp(-np.pi * (f + f0) ** 2))
return f, X, t, x
def setKnob(self, value):
# Note, we ignore value arg here and just go by state of the params
x1, y1, x2, y2 = self.compute(self.f0.value, self.A.value)
# update the data of the two waveforms
self.lines[0].set(xdata=x1, ydata=y1)
self.lines[1].set(xdata=x2, ydata=y2)
# make the canvas draw its contents again with the new data
self.canvas.draw()
| FourierDemoFrame |
python | celery__celery | celery/exceptions.py | {
"start": 4346,
"end": 4428
} | class ____(CeleryWarning):
"""Potential security issue found."""
| SecurityWarning |
python | joke2k__faker | faker/providers/person/ha_NG/__init__.py | {
"start": 232,
"end": 1752
} | class ____(PersonProvider):
# Male first names
first_names_male = [
"Abdullahi",
"Musa",
"Sani",
"Ibrahim",
"Aliyu",
"Bello",
"Kabiru",
"Shehu",
"Yusuf",
"Haruna",
"Ismail",
"Usman",
"Nasiru",
"Mahmud",
"Umar",
"Habibu",
"Danjuma",
"Tanimu",
"Shamsuddeen",
"Ahmad",
]
# Female first names
first_names_female = [
"Zainab",
"Aisha",
"Hauwa",
"Fatima",
"Hadiza",
"Maryam",
"Sa’adatu",
"Jamila",
"Rabi",
"Khadija",
"Bilkisu",
"Asma’u",
"Halima",
"Safiya",
"Sumayya",
"Habiba",
"Ruqayya",
"Hafsat",
"Aminatu",
"Gambo",
]
# Combined list
first_names = first_names_male + first_names_female
# Prefixes
prefixes_male = ["Alhaji", "Mallam", "Dr.", "Prof."]
prefixes_female = ["Hajiya", "Mrs.", "Dr.", "Prof."]
prefixes = prefixes_male + prefixes_female
# Last names
last_names = [
"Abubakar",
"Mohammed",
"Yahaya",
"Garba",
"Danjuma",
"Buhari",
"Zubairu",
"Jibril",
"Suleiman",
"Lawal",
"Tukur",
"Ali",
"Shehu",
"Mustapha",
"Kabir",
"Idris",
"Sa’idu",
"Bappa",
"Yusuf",
"Isah",
]
| Provider |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 11009,
"end": 11238
} | class ____(list[PotentialTypeVar]):
def __new__(cls: type[Generic5]) -> Generic5: ...
def __enter__(self: Generic5) -> Generic5: ...
# Test cases based on issue #20781 - metaclasses that triggers IsMetaclass::Maybe
| Generic5 |
python | huggingface__transformers | src/transformers/models/got_ocr2/modeling_got_ocr2.py | {
"start": 20891,
"end": 22477
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for GotOcr2 outputs, with hidden states and attentions.
"""
)
| GotOcr2CausalLMOutputWithPast |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 151250,
"end": 153085
} | class ____(fixtures.TestBase):
@testing.combinations(
(Range(2, 7), INT4RANGE),
(Range(-10, 7), INT4RANGE),
(Range(None, -7), INT4RANGE),
(Range(33, None), INT4RANGE),
(Range(-2147483648, 2147483647), INT4RANGE),
(Range(-2147483648 - 1, 2147483647), INT8RANGE),
(Range(-2147483648, 2147483647 + 1), INT8RANGE),
(Range(-2147483648 - 1, None), INT8RANGE),
(Range(None, 2147483647 + 1), INT8RANGE),
)
def test_resolve_for_literal(self, obj, type_):
"""This tests that the int4 / int8 version is selected correctly by
_resolve_for_literal."""
lit = literal(obj)
eq_(type(lit.type), type_)
@testing.combinations(
(Range(2, 7), INT4MULTIRANGE),
(Range(-10, 7), INT4MULTIRANGE),
(Range(None, -7), INT4MULTIRANGE),
(Range(33, None), INT4MULTIRANGE),
(Range(-2147483648, 2147483647), INT4MULTIRANGE),
(Range(-2147483648 - 1, 2147483647), INT8MULTIRANGE),
(Range(-2147483648, 2147483647 + 1), INT8MULTIRANGE),
(Range(-2147483648 - 1, None), INT8MULTIRANGE),
(Range(None, 2147483647 + 1), INT8MULTIRANGE),
)
def test_resolve_for_literal_multi(self, obj, type_):
"""This tests that the int4 / int8 version is selected correctly by
_resolve_for_literal."""
list_ = MultiRange([Range(-1, 1), obj, Range(7, 100)])
lit = literal(list_)
eq_(type(lit.type), type_)
def test_multirange_sequence(self):
plain = [Range(-1, 1), Range(42, 43), Range(7, 100)]
mr = MultiRange(plain)
is_true(issubclass(MultiRange, list))
is_true(isinstance(mr, list))
eq_(mr, plain)
eq_(str(mr), str(plain))
eq_(repr(mr), repr(plain))
ne_(mr, plain[1:])
| RangeMiscTests |
python | django__django | tests/m2m_through/models.py | {
"start": 4045,
"end": 4183
} | class ____(models.Model):
iname = models.CharField(max_length=20, unique=True)
class Meta:
ordering = ("iname",)
| Ingredient |
python | python-excel__xlrd | xlrd/sheet.py | {
"start": 99063,
"end": 101577
} | class ____(BaseObject):
"""
Contains the data for one cell.
.. warning::
You don't call this class yourself. You access :class:`Cell` objects
via methods of the :class:`Sheet` object(s) that you found in the
:class:`~xlrd.book.Book` object that was returned when you called
:func:`~xlrd.open_workbook`
Cell objects have three attributes: ``ctype`` is an int, ``value``
(which depends on ``ctype``) and ``xf_index``.
If ``formatting_info`` is not enabled when the workbook is opened,
``xf_index`` will be ``None``.
The following table describes the types of cells and how their values
are represented in Python.
.. raw:: html
<table border="1" cellpadding="7">
<tr>
<th>Type symbol</th>
<th>Type number</th>
<th>Python value</th>
</tr>
<tr>
<td>XL_CELL_EMPTY</td>
<td align="center">0</td>
<td>empty string ''</td>
</tr>
<tr>
<td>XL_CELL_TEXT</td>
<td align="center">1</td>
<td>a Unicode string</td>
</tr>
<tr>
<td>XL_CELL_NUMBER</td>
<td align="center">2</td>
<td>float</td>
</tr>
<tr>
<td>XL_CELL_DATE</td>
<td align="center">3</td>
<td>float</td>
</tr>
<tr>
<td>XL_CELL_BOOLEAN</td>
<td align="center">4</td>
<td>int; 1 means TRUE, 0 means FALSE</td>
</tr>
<tr>
<td>XL_CELL_ERROR</td>
<td align="center">5</td>
<td>int representing internal Excel codes; for a text representation,
refer to the supplied dictionary error_text_from_code</td>
</tr>
<tr>
<td>XL_CELL_BLANK</td>
<td align="center">6</td>
<td>empty string ''. Note: this type will appear only when
open_workbook(..., formatting_info=True) is used.</td>
</tr>
</table>
"""
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
empty_cell = Cell(XL_CELL_EMPTY, UNICODE_LITERAL(''))
##### =============== Colinfo and Rowinfo ============================== #####
| Cell |
python | walkccc__LeetCode | solutions/1621. Number of Sets of K Non-Overlapping Line Segments/1621.py | {
"start": 0,
"end": 671
} | class ____:
def numberOfSets(self, n: int, k: int) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(i: int, k: int, drawing: bool) -> int:
if k == 0: # Find a way to draw k segments.
return 1
if i == n: # Reach the end.
return 0
if drawing:
# 1. Keep drawing at i and move to i + 1.
# 2. Stop at i so decrease k. We can start from i for the next segment.
return (dp(i + 1, k, True) + dp(i, k - 1, False)) % MOD
# 1. Skip i and move to i + 1.
# 2. Start at i and move to i + 1.
return (dp(i + 1, k, False) + dp(i + 1, k, True)) % MOD
return dp(0, k, False)
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_volume_attachment_status.py | {
"start": 383,
"end": 7156
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'attach_error': 'V1VolumeError',
'attached': 'bool',
'attachment_metadata': 'dict(str, str)',
'detach_error': 'V1VolumeError'
}
attribute_map = {
'attach_error': 'attachError',
'attached': 'attached',
'attachment_metadata': 'attachmentMetadata',
'detach_error': 'detachError'
}
def __init__(self, attach_error=None, attached=None, attachment_metadata=None, detach_error=None, local_vars_configuration=None): # noqa: E501
"""V1VolumeAttachmentStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._attach_error = None
self._attached = None
self._attachment_metadata = None
self._detach_error = None
self.discriminator = None
if attach_error is not None:
self.attach_error = attach_error
self.attached = attached
if attachment_metadata is not None:
self.attachment_metadata = attachment_metadata
if detach_error is not None:
self.detach_error = detach_error
@property
def attach_error(self):
"""Gets the attach_error of this V1VolumeAttachmentStatus. # noqa: E501
:return: The attach_error of this V1VolumeAttachmentStatus. # noqa: E501
:rtype: V1VolumeError
"""
return self._attach_error
@attach_error.setter
def attach_error(self, attach_error):
"""Sets the attach_error of this V1VolumeAttachmentStatus.
:param attach_error: The attach_error of this V1VolumeAttachmentStatus. # noqa: E501
:type: V1VolumeError
"""
self._attach_error = attach_error
@property
def attached(self):
"""Gets the attached of this V1VolumeAttachmentStatus. # noqa: E501
attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:return: The attached of this V1VolumeAttachmentStatus. # noqa: E501
:rtype: bool
"""
return self._attached
@attached.setter
def attached(self, attached):
"""Sets the attached of this V1VolumeAttachmentStatus.
attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:param attached: The attached of this V1VolumeAttachmentStatus. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and attached is None: # noqa: E501
raise ValueError("Invalid value for `attached`, must not be `None`") # noqa: E501
self._attached = attached
@property
def attachment_metadata(self):
"""Gets the attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:return: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._attachment_metadata
@attachment_metadata.setter
def attachment_metadata(self, attachment_metadata):
"""Sets the attachment_metadata of this V1VolumeAttachmentStatus.
attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:param attachment_metadata: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
:type: dict(str, str)
"""
self._attachment_metadata = attachment_metadata
@property
def detach_error(self):
"""Gets the detach_error of this V1VolumeAttachmentStatus. # noqa: E501
:return: The detach_error of this V1VolumeAttachmentStatus. # noqa: E501
:rtype: V1VolumeError
"""
return self._detach_error
@detach_error.setter
def detach_error(self, detach_error):
"""Sets the detach_error of this V1VolumeAttachmentStatus.
:param detach_error: The detach_error of this V1VolumeAttachmentStatus. # noqa: E501
:type: V1VolumeError
"""
self._detach_error = detach_error
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumeAttachmentStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumeAttachmentStatus):
return True
return self.to_dict() != other.to_dict()
| V1VolumeAttachmentStatus |
python | ray-project__ray | python/ray/data/preprocessors/serialization_handlers.py | {
"start": 435,
"end": 597
} | class ____(Enum):
"""Enum for consistent format naming in the factory."""
CLOUDPICKLE = "cloudpickle"
PICKLE = "pickle"
@DeveloperAPI
| HandlerFormatName |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_liblinear.py | {
"start": 435,
"end": 2257
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
with ignore_warnings(feature_preprocessing_warnings):
transformation, original = _test_preprocessing(LibLinear_Preprocessor)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="digits", make_sparse=False
)
configuration_space = (
LibLinear_Preprocessor.get_hyperparameter_search_space()
)
default = configuration_space.get_default_configuration()
preprocessor = LibLinear_Preprocessor(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None
},
)
with ignore_warnings(feature_preprocessing_warnings):
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RidgeClassifier()
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.8548876745598057, places=2)
def test_preprocessing_dtype(self):
with ignore_warnings(feature_preprocessing_warnings):
super()._test_preprocessing_dtype(LibLinear_Preprocessor, test_sparse=False)
| LiblinearComponentTest |
python | spyder-ide__spyder | spyder/plugins/layout/widgets/dialog.py | {
"start": 933,
"end": 4387
} | class ____(QAbstractTableModel):
""" """
def __init__(self, parent, names, ui_names, order, active, read_only):
super().__init__(parent)
# variables
self._parent = parent
self.names = names
self.ui_names = ui_names
self.order = order
self.active = active
self.read_only = read_only
self._rows = []
self.set_data(names, ui_names, order, active, read_only)
def set_data(self, names, ui_names, order, active, read_only):
""" """
self._rows = []
self.names = names
self.ui_names = ui_names
self.order = order
self.active = active
self.read_only = read_only
for name in order:
index = names.index(name)
if name in active:
row = [ui_names[index], name, True]
else:
row = [ui_names[index], name, False]
self._rows.append(row)
def flags(self, index):
"""Override Qt method"""
row = index.row()
ui_name, name, state = self.row(row)
if name in self.read_only:
return Qt.ItemFlag(0)
if not index.isValid():
return Qt.ItemFlag.ItemIsEnabled
column = index.column()
if column in [0]:
return (Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable |
Qt.ItemFlag.ItemIsUserCheckable |
Qt.ItemFlag.ItemIsEditable)
else:
return Qt.ItemFlag.ItemIsEnabled
def data(self, index, role=Qt.DisplayRole):
"""Override Qt method"""
if not index.isValid() or not 0 <= index.row() < len(self._rows):
return to_qvariant()
row = index.row()
column = index.column()
ui_name, name, state = self.row(row)
if role == Qt.DisplayRole or role == Qt.EditRole:
if column == 0:
return to_qvariant(ui_name)
elif role == Qt.UserRole:
if column == 0:
return to_qvariant(name)
elif role == Qt.CheckStateRole:
if column == 0:
if state:
return Qt.Checked
else:
return Qt.Unchecked
if column == 1:
return to_qvariant(state)
return to_qvariant()
def setData(self, index, value, role):
"""Override Qt method"""
row = index.row()
ui_name, name, state = self.row(row)
if role == Qt.CheckStateRole:
self.set_row(row, [ui_name, name, not state])
self._parent.setCurrentIndex(index)
self._parent.setFocus()
self.dataChanged.emit(index, index)
return True
elif role == Qt.EditRole:
self.set_row(row, [from_qvariant(value, str), name, state])
self.dataChanged.emit(index, index)
return True
return True
def rowCount(self, index=QModelIndex()):
"""Override Qt method"""
return len(self._rows)
def columnCount(self, index=QModelIndex()):
"""Override Qt method"""
return 2
def row(self, rownum):
""" """
if self._rows == [] or rownum >= len(self._rows):
return [None, None, None]
else:
return self._rows[rownum]
def set_row(self, rownum, value):
""" """
self._rows[rownum] = value
| LayoutModel |
python | pypa__pip | src/pip/_internal/index/collector.py | {
"start": 12546,
"end": 12672
} | class ____(NamedTuple):
find_links: Sequence[LinkSource | None]
index_urls: Sequence[LinkSource | None]
| CollectedSources |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 40991,
"end": 41761
} | class ____:
"""Combines the horizontal and vertical alignment properties into a single property."""
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.horizontal = f"{name}_horizontal"
self.vertical = f"{name}_vertical"
def __get__(
self, obj: StylesBase, type: type[StylesBase]
) -> tuple[AlignHorizontal, AlignVertical]:
horizontal = getattr(obj, self.horizontal)
vertical = getattr(obj, self.vertical)
return (horizontal, vertical)
def __set__(
self, obj: StylesBase, value: tuple[AlignHorizontal, AlignVertical]
) -> None:
horizontal, vertical = value
setattr(obj, self.horizontal, horizontal)
setattr(obj, self.vertical, vertical)
| AlignProperty |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 19410,
"end": 19619
} | class ____(Token, Expr):
__slots__ = _fields = ('array', 'dim', 'mask')
defaults = {'dim': none, 'mask': none}
_construct_array = staticmethod(sympify)
_construct_dim = staticmethod(sympify)
| sum_ |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/loop34.py | {
"start": 297,
"end": 768
} | class ____(Protocol[_T_contra]):
def __lt__(self, __other: _T_contra) -> bool: ...
SupportsRichComparison: TypeAlias = SupportsDunderLT[Any] | SupportsDunderGT[Any]
SupportsRichComparisonT = TypeVar(
"SupportsRichComparisonT", bound=SupportsRichComparison
)
def max(
__arg1: SupportsRichComparisonT, __arg2: SupportsRichComparisonT
) -> SupportsRichComparisonT: ...
a: int = 1
while True:
while a >= 0:
a -= 1
a = max(0, a)
| SupportsDunderLT |
python | kamyu104__LeetCode-Solutions | Python/count-all-valid-pickup-and-delivery-options.py | {
"start": 29,
"end": 306
} | class ____(object):
def countOrders(self, n):
"""
:type n: int
:rtype: int
"""
MOD = 10**9+7
result = 1
for i in reversed(xrange(2, 2*n+1, 2)):
result = result * i*(i-1)//2 % MOD
return result
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/tenants.py | {
"start": 4476,
"end": 5271
} | class ____(str, Enum):
"""TenantActivityStatus class used to describe the activity status of a tenant to update in Weaviate.
Attributes:
ACTIVE: The tenant is fully active and can be used.
INACTIVE: The tenant is not active, files stored locally.
OFFLOADED: The tenant is not active, files stored on the cloud.
HOT: DEPRECATED, please use ACTIVE. The tenant is fully active and can be used.
COLD: DEPRECATED, please use INACTIVE. The tenant is not active, files stored locally.
FROZEN: DEPRECATED, please use OFFLOADED. The tenant is not active, files stored on the cloud.
"""
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
OFFLOADED = "OFFLOADED"
HOT = "HOT"
COLD = "COLD"
FROZEN = "FROZEN"
| TenantUpdateActivityStatus |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/exceptions.py | {
"start": 12938,
"end": 13616
} | class ____(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self) -> None:
self.errors: List["HashError"] = []
def append(self, error: "HashError") -> None:
self.errors.append(error)
def __str__(self) -> str:
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return "\n".join(lines)
return ""
def __bool__(self) -> bool:
return bool(self.errors)
| HashErrors |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/missingSuper1.py | {
"start": 328,
"end": 436
} | class ____:
def __init__(self):
pass
def __init_subclass__(cls) -> None:
pass
| ParentD |
python | joke2k__faker | faker/providers/company/az_AZ/__init__.py | {
"start": 45,
"end": 1243
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{large_company}}",
)
large_companies = (
"AZAL",
"Azergold",
"SOCAR",
"Socar Polymer",
"Global Export Fruits",
"Baku Steel Company",
"Azersun",
"Sun Food",
"Azərbaycan Şəkər İstehsalat Birliyi",
"Azərsu",
"Xəzər Dəniz Gəmiçiliyi",
"Azərenerji",
"Bakıelektrikşəbəkə",
"Azəralüminium",
"Bravo",
"Azərpambıq Aqrar Sənaye Kompleksi",
"CTS-Agro",
"Azərtütün Aqrar Sənaye Kompleksi",
"Azəripək",
"Azfruittrade",
"AF Holding",
"Azinko Holding",
"Gilan Holding",
"Azpetrol",
"Azərtexnolayn",
"Bakı Gəmiqayırma Zavodu",
"Gəncə Tekstil Fabriki",
"Mətanət A",
"İrşad Electronics",
)
company_suffixes = (
"ASC",
"QSC",
"MMC",
)
def large_company(self) -> str:
"""
:example: 'SOCAR'
"""
return self.random_element(self.large_companies)
| Provider |
python | openai__openai-python | src/openai/cli/_api/audio.py | {
"start": 1541,
"end": 1758
} | class ____(BaseModel):
model: str
file: str
response_format: Optional[str] = None
language: Optional[str] = None
temperature: Optional[float] = None
prompt: Optional[str] = None
| CLITranscribeArgs |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs_feed.py | {
"start": 1825,
"end": 2068
} | class ____(graphene.Union):
class Meta:
types = (GrapheneRunsFeedCount, GraphenePythonError)
name = "RunsFeedCountOrError"
types = [GrapheneRunsFeedConnectionOrError, GrapheneRunsFeedCountOrError]
| GrapheneRunsFeedCountOrError |
python | anthropics__anthropic-sdk-python | src/anthropic/types/signature_delta.py | {
"start": 190,
"end": 280
} | class ____(BaseModel):
signature: str
type: Literal["signature_delta"]
| SignatureDelta |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 2306,
"end": 2412
} | class ____(Article):
quality = models.IntegerField()
class Meta:
abstract = True
| Evaluation |
python | pypa__pipenv | pipenv/vendor/pythonfinder/pythonfinder.py | {
"start": 404,
"end": 7433
} | class ____:
"""
Main finder class that orchestrates all the finders.
"""
def __init__(
self,
path: str | None = None,
system: bool = False,
global_search: bool = True,
ignore_unsupported: bool = True,
sort_by_path: bool = False,
):
"""
Initialize a new Finder.
Args:
path: Path to prepend to the search path.
system: Whether to include the system Python.
global_search: Whether to search in the system PATH.
ignore_unsupported: Whether to ignore unsupported Python versions.
"""
self.path = path
self.system = system
self.global_search = global_search
self.ignore_unsupported = ignore_unsupported
self.sort_by_path = sort_by_path
# Initialize finders
self.system_finder = SystemFinder(
paths=[path] if path else None,
global_search=global_search,
system=system,
ignore_unsupported=ignore_unsupported,
)
self.pyenv_finder = PyenvFinder(
ignore_unsupported=ignore_unsupported,
)
self.asdf_finder = AsdfFinder(
ignore_unsupported=ignore_unsupported,
)
# Initialize Windows-specific finders if on Windows
self.py_launcher_finder = None
self.windows_finder = None
if os.name == "nt":
self.py_launcher_finder = PyLauncherFinder(
ignore_unsupported=ignore_unsupported,
)
self.windows_finder = WindowsRegistryFinder(
ignore_unsupported=ignore_unsupported,
)
# List of all finders
self.finders: list[BaseFinder] = [
self.pyenv_finder,
self.asdf_finder,
]
# Add Windows-specific finders if on Windows
if self.py_launcher_finder:
self.finders.append(self.py_launcher_finder)
if self.windows_finder:
self.finders.append(self.windows_finder)
# Add system finder last
self.finders.append(self.system_finder)
def which(self, executable: str) -> Path | None:
"""
Find an executable in the paths searched by this finder.
Args:
executable: The name of the executable to find.
Returns:
The path to the executable, or None if not found.
"""
for finder in self.finders:
path = finder.which(executable)
if path:
return path
return None
def find_python_version(
self,
major: str | int | None = None,
minor: int | None = None,
patch: int | None = None,
pre: bool | None = None,
dev: bool | None = None,
arch: str | None = None,
name: str | None = None,
) -> PythonInfo | None:
"""
Find a Python version matching the specified criteria.
Args:
major: Major version number or full version string.
minor: Minor version number.
patch: Patch version number.
pre: Whether to include pre-releases.
dev: Whether to include dev-releases.
arch: Architecture to include, e.g. '64bit'.
name: The name of a python version, e.g. ``anaconda3-5.3.0``.
Returns:
A PythonInfo object matching the criteria, or None if not found.
"""
# Parse the major version if it's a string
if isinstance(major, str) and not any([minor, patch, pre, dev, arch]):
for finder in self.finders:
version_dict = finder.parse_major(major, minor, patch, pre, dev, arch)
if version_dict.get("name") and not name:
name = version_dict.get("name")
major = version_dict.get("major")
minor = version_dict.get("minor")
patch = version_dict.get("patch")
pre = version_dict.get("is_prerelease")
dev = version_dict.get("is_devrelease")
arch = version_dict.get("arch")
break
# Try to find the Python version in each finder
for finder in self.finders:
python_version = finder.find_python_version(
major, minor, patch, pre, dev, arch, name
)
if python_version:
return python_version
return None
def find_all_python_versions(
self,
major: str | int | None = None,
minor: int | None = None,
patch: int | None = None,
pre: bool | None = None,
dev: bool | None = None,
arch: str | None = None,
name: str | None = None,
) -> list[PythonInfo]:
"""
Find all Python versions matching the specified criteria.
Args:
major: Major version number or full version string.
minor: Minor version number.
patch: Patch version number.
pre: Whether to include pre-releases.
dev: Whether to include dev-releases.
arch: Architecture to include, e.g. '64bit'.
name: The name of a python version, e.g. ``anaconda3-5.3.0``.
Returns:
A list of PythonInfo objects matching the criteria.
"""
# Parse the major version if it's a string
if isinstance(major, str) and not any([minor, patch, pre, dev, arch]):
for finder in self.finders:
version_dict = finder.parse_major(major, minor, patch, pre, dev, arch)
if version_dict.get("name") and not name:
name = version_dict.get("name")
major = version_dict.get("major")
minor = version_dict.get("minor")
patch = version_dict.get("patch")
pre = version_dict.get("is_prerelease")
dev = version_dict.get("is_devrelease")
arch = version_dict.get("arch")
break
# Find all Python versions in each finder
python_versions = []
for finder in self.finders:
python_versions.extend(
finder.find_all_python_versions(major, minor, patch, pre, dev, arch, name)
)
# Sort by version and remove duplicates
seen_paths = set()
unique_versions = []
# Choose the sort key based on sort_by_path
if self.sort_by_path:
def sort_key(x):
return x.path, x.version_sort
else:
def sort_key(x):
return x.version_sort
for version in sorted(
python_versions, key=sort_key, reverse=not self.sort_by_path
):
if version.path not in seen_paths:
seen_paths.add(version.path)
unique_versions.append(version)
return unique_versions
| Finder |
python | getsentry__sentry | src/sentry/analytics/events/first_user_context_sent.py | {
"start": 80,
"end": 240
} | class ____(analytics.Event):
user_id: int
organization_id: int
project_id: int
analytics.register(FirstUserContextSentEvent)
| FirstUserContextSentEvent |
python | ethereum__web3.py | ens/exceptions.py | {
"start": 1523,
"end": 1645
} | class ____(ENSException):
"""
Raised if a resolver does not support a particular method.
"""
| UnsupportedFunction |
python | PyCQA__pylint | pylint/extensions/code_style.py | {
"start": 579,
"end": 14530
} | class ____(BaseChecker):
"""Checkers that can improve code consistency.
As such they don't necessarily provide a performance benefit and
are often times opinionated.
Before adding another checker here, consider this:
1. Does the checker provide a clear benefit,
i.e. detect a common issue or improve performance
=> it should probably be part of the core checker classes
2. Is it something that would improve code consistency,
maybe because it's slightly better with regard to performance
and therefore preferred => this is the right place
3. Everything else should go into another extension
"""
name = "code_style"
msgs = {
"R6101": (
"Consider using namedtuple or dataclass for dictionary values",
"consider-using-namedtuple-or-dataclass",
"Emitted when dictionary values can be replaced by namedtuples or dataclass instances.",
),
"R6102": (
"Consider using an in-place tuple instead of list",
"consider-using-tuple",
"Only for style consistency! "
"Emitted where an in-place defined ``list`` can be replaced by a ``tuple``. "
"Due to optimizations by CPython, there is no performance benefit from it.",
),
"R6103": (
"Use '%s' instead",
"consider-using-assignment-expr",
"Emitted when an if assignment is directly followed by an if statement and "
"both can be combined by using an assignment expression ``:=``. "
"Requires Python 3.8 and ``py-version >= 3.8``.",
),
"R6104": (
"Use '%s' to do an augmented assign directly",
"consider-using-augmented-assign",
"Emitted when an assignment is referring to the object that it is assigning "
"to. This can be changed to be an augmented assign.\n"
"Disabled by default!",
{
"default_enabled": False,
},
),
"R6105": (
"Prefer 'typing.NamedTuple' over 'collections.namedtuple'",
"prefer-typing-namedtuple",
"'typing.NamedTuple' uses the well-known 'class' keyword "
"with type-hints for readability (it's also faster as it avoids "
"an internal exec call).\n"
"Disabled by default!",
{
"default_enabled": False,
},
),
"R6106": (
"Consider %smath.%s instead of %s",
"consider-math-not-float",
"Using math.inf or math.nan permits to benefit from typing and it is up "
"to 4 times faster than a float call (after the initial import of math). "
"This check also catches typos in float calls as a side effect.",
),
}
options = (
(
"max-line-length-suggestions",
{
"type": "int",
"default": 0,
"metavar": "<int>",
"help": (
"Max line length for which to sill emit suggestions. "
"Used to prevent optional suggestions which would get split "
"by a code formatter (e.g., black). "
"Will default to the setting for ``max-line-length``."
),
},
),
)
def open(self) -> None:
py_version = self.linter.config.py_version
self._py36_plus = py_version >= (3, 6)
self._py38_plus = py_version >= (3, 8)
self._max_length: int = (
self.linter.config.max_line_length_suggestions
or self.linter.config.max_line_length
)
@only_required_for_messages("prefer-typing-namedtuple", "consider-math-not-float")
def visit_call(self, node: nodes.Call) -> None:
if self._py36_plus:
called = safe_infer(node.func)
if not (called and isinstance(called, (nodes.FunctionDef, nodes.ClassDef))):
return
if called.qname() == "collections.namedtuple":
self.add_message(
"prefer-typing-namedtuple", node=node, confidence=INFERENCE
)
elif called.qname() == "builtins.float":
if (
node.args
and isinstance(node.args[0], nodes.Const)
and isinstance(node.args[0].value, str)
and any(
c.isalpha() and c.lower() != "e" for c in node.args[0].value
)
):
value = node.args[0].value.lower()
math_call: str
if "nan" in value:
math_call = "nan"
elif "inf" in value:
math_call = "inf"
else:
math_call = difflib.get_close_matches(
value, ["inf", "nan"], n=1, cutoff=0
)[0]
minus = "-" if math_call == "inf" and value.startswith("-") else ""
self.add_message(
"consider-math-not-float",
node=node,
args=(minus, math_call, node.as_string()),
confidence=INFERENCE,
)
@only_required_for_messages("consider-using-namedtuple-or-dataclass")
def visit_dict(self, node: nodes.Dict) -> None:
self._check_dict_consider_namedtuple_dataclass(node)
@only_required_for_messages("consider-using-tuple")
def visit_for(self, node: nodes.For) -> None:
if isinstance(node.iter, nodes.List):
self.add_message("consider-using-tuple", node=node.iter)
@only_required_for_messages("consider-using-tuple")
def visit_comprehension(self, node: nodes.Comprehension) -> None:
if isinstance(node.iter, nodes.List):
self.add_message("consider-using-tuple", node=node.iter)
@only_required_for_messages("consider-using-assignment-expr")
def visit_if(self, node: nodes.If) -> None:
if self._py38_plus:
self._check_consider_using_assignment_expr(node)
def _check_dict_consider_namedtuple_dataclass(self, node: nodes.Dict) -> None:
"""Check if dictionary values can be replaced by Namedtuple or Dataclass."""
if not (
(
isinstance(node.parent, (nodes.Assign, nodes.AnnAssign))
and isinstance(node.parent.parent, nodes.Module)
)
or (
isinstance(node.parent, nodes.AnnAssign)
and isinstance(node.parent.target, nodes.AssignName)
and utils.is_assign_name_annotated_with(node.parent.target, "Final")
)
):
# If dict is not part of an 'Assign' or 'AnnAssign' node in
# a module context OR 'AnnAssign' with 'Final' annotation, skip check.
return
# All dict_values are itself dict nodes
if len(node.items) > 1 and all(
isinstance(dict_value, nodes.Dict) for _, dict_value in node.items
):
KeyTupleT = tuple[type[nodes.NodeNG], str]
# Makes sure all keys are 'Const' string nodes
keys_checked: set[KeyTupleT] = set()
for _, dict_value in node.items:
dict_value = cast(nodes.Dict, dict_value)
for key, _ in dict_value.items:
key_tuple = (type(key), key.as_string())
if key_tuple in keys_checked:
continue
inferred = safe_infer(key)
if not (
isinstance(inferred, nodes.Const)
and inferred.pytype() == "builtins.str"
):
return
keys_checked.add(key_tuple)
# Makes sure all subdicts have at least 1 common key
key_tuples: list[tuple[KeyTupleT, ...]] = []
for _, dict_value in node.items:
dict_value = cast(nodes.Dict, dict_value)
key_tuples.append(
tuple((type(key), key.as_string()) for key, _ in dict_value.items)
)
keys_intersection: set[KeyTupleT] = set(key_tuples[0])
for sub_key_tuples in key_tuples[1:]:
keys_intersection.intersection_update(sub_key_tuples)
if not keys_intersection:
return
self.add_message("consider-using-namedtuple-or-dataclass", node=node)
return
# All dict_values are itself either list or tuple nodes
if len(node.items) > 1 and all(
isinstance(dict_value, (nodes.List, nodes.Tuple))
for _, dict_value in node.items
):
# Make sure all sublists have the same length > 0
list_length = len(node.items[0][1].elts)
if list_length == 0:
return
for _, dict_value in node.items[1:]:
if len(dict_value.elts) != list_length:
return
# Make sure at least one list entry isn't a dict
for _, dict_value in node.items:
if all(isinstance(entry, nodes.Dict) for entry in dict_value.elts):
return
self.add_message("consider-using-namedtuple-or-dataclass", node=node)
return
def _check_consider_using_assignment_expr(self, node: nodes.If) -> None:
"""Check if an assignment expression (walrus operator) can be used.
For example if an assignment is directly followed by an if statement:
>>> x = 2
>>> if x:
>>> ...
Can be replaced by:
>>> if (x := 2):
>>> ...
Note: Assignment expressions were added in Python 3.8
"""
# Check if `node.test` contains a `Name` node
match node.test:
case (
(nodes.Name() as node_name)
| nodes.UnaryOp(op="not", operand=nodes.Name() as node_name)
| nodes.Compare(left=nodes.Name() as node_name, ops=[_])
):
pass
case _:
return
# Make sure the previous node is an assignment to the same name
# used in `node.test`. Furthermore, ignore if assignment spans multiple lines.
prev_sibling = node.previous_sibling()
if CodeStyleChecker._check_prev_sibling_to_if_stmt(
prev_sibling, node_name.name
):
# Check if match statement would be a better fit.
# I.e. multiple ifs that test the same name.
if CodeStyleChecker._check_ignore_assignment_expr_suggestion(
node, node_name.name
):
return
# Build suggestion string. Check length of suggestion
# does not exceed max-line-length-suggestions
test_str = node.test.as_string().replace(
node_name.name,
f"({node_name.name} := {prev_sibling.value.as_string()})",
1,
)
suggestion = f"if {test_str}:"
if (
node.col_offset is not None
and len(suggestion) + node.col_offset > self._max_length
) or len(suggestion) > self._max_length:
return
self.add_message(
"consider-using-assignment-expr",
node=node_name,
args=(suggestion,),
)
@staticmethod
def _check_prev_sibling_to_if_stmt(
prev_sibling: nodes.NodeNG | None, name: str | None
) -> TypeGuard[nodes.Assign | nodes.AnnAssign]:
"""Check if previous sibling is an assignment with the same name.
Ignore statements which span multiple lines.
"""
if prev_sibling is None or prev_sibling.tolineno - prev_sibling.fromlineno != 0:
return False
match prev_sibling:
case nodes.Assign(
targets=[nodes.AssignName(name=target_name)]
) | nodes.AnnAssign(target=nodes.AssignName(name=target_name)):
return target_name == name and prev_sibling.value is not None
return False
@staticmethod
def _check_ignore_assignment_expr_suggestion(
node: nodes.If, name: str | None
) -> bool:
"""Return True if suggestion for assignment expr should be ignored.
E.g., in cases where a match statement would be a better fit
(multiple conditions).
"""
if isinstance(node.test, nodes.Compare):
next_if_node: nodes.If | None = None
next_sibling = node.next_sibling()
if len(node.orelse) == 1 and isinstance(node.orelse[0], nodes.If):
# elif block
next_if_node = node.orelse[0]
elif isinstance(next_sibling, nodes.If):
# separate if block
next_if_node = next_sibling
match next_if_node:
case nodes.If(
test=nodes.Compare(left=nodes.Name(name=n)) | nodes.Name(name=n)
) if (n == name):
return True
return False
@only_required_for_messages("consider-using-augmented-assign")
def visit_assign(self, node: nodes.Assign) -> None:
is_aug, op = utils.is_augmented_assign(node)
if is_aug:
self.add_message(
"consider-using-augmented-assign",
args=f"{op}=",
node=node,
line=node.lineno,
col_offset=node.col_offset,
confidence=INFERENCE,
)
def register(linter: PyLinter) -> None:
linter.register_checker(CodeStyleChecker(linter))
| CodeStyleChecker |
python | django__django | tests/model_forms/models.py | {
"start": 13153,
"end": 13328
} | class ____(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=30)
# Models for #24706
| UUIDPK |
python | ray-project__ray | python/ray/data/_internal/execution/interfaces/ref_bundle.py | {
"start": 430,
"end": 741
} | class ____:
"""A slice of a block."""
# Starting row offset (inclusive) within the block.
start_offset: int
# Ending row offset (exclusive) within the block.
end_offset: int
@property
def num_rows(self) -> int:
return self.end_offset - self.start_offset
@dataclass
| BlockSlice |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 582,
"end": 1052
} | class ____(TD1[_T]):
e: _T
f: ReadOnly[str]
td1: TD1[float] = {"a": 3, "b": "", "c": [], "d": {}, "e": 0.0}
reveal_type(td1.get("a"), expected_text="int")
reveal_type(td1.get("b"), expected_text="str")
reveal_type(td1.get("c"), expected_text="list[str]")
reveal_type(td1.get("d"), expected_text="dict[str, str]")
reveal_type(td1.get("e"), expected_text="float")
td2: TD2[float] = {"a": 3, "b": "", "c": [], "d": {}, "e": 0.0, "f": ""}
x1: TD1[float] = td2
| TD2 |
python | realpython__materials | python-protocol/birds_v2.py | {
"start": 208,
"end": 451
} | class ____(QuackingThing):
def quack(self):
return "The person is imitating a duck quacking!"
def make_it_quack(duck: QuackingThing) -> str:
return duck.quack()
print(make_it_quack(Duck()))
print(make_it_quack(Person()))
| Person |
python | PrefectHQ__prefect | src/prefect/server/orchestration/dependencies.py | {
"start": 565,
"end": 4671
} | class ____(TypedDict):
task_policy_provider: TaskRunPolicyProvider | None
flow_policy_provider: FlowRunPolicyProvider | None
task_orchestration_parameters_provider: ParameterProvider | None
flow_orchestration_parameters_provider: ParameterProvider | None
ORCHESTRATION_DEPENDENCIES: OrchestrationDependencies = {
"task_policy_provider": None,
"flow_policy_provider": None,
"task_orchestration_parameters_provider": None,
"flow_orchestration_parameters_provider": None,
}
WORKER_VERSIONS_THAT_MANAGE_DEPLOYMENT_CONCURRENCY = {
"3.0.0rc20",
"3.0.0",
"3.0.1",
"3.0.2",
"3.0.3",
}
MIN_CLIENT_VERSION_FOR_CONCURRENCY_LIMIT_LEASING = Version("3.4.11")
async def provide_task_policy() -> type[TaskRunOrchestrationPolicy]:
policy_provider = ORCHESTRATION_DEPENDENCIES.get("task_policy_provider")
if policy_provider is None:
from prefect.server.orchestration.core_policy import CoreTaskPolicy
return CoreTaskPolicy
return await policy_provider()
async def provide_flow_policy() -> type[FlowRunOrchestrationPolicy]:
policy_provider = ORCHESTRATION_DEPENDENCIES.get("flow_policy_provider")
if policy_provider is None:
from prefect.server.orchestration.core_policy import (
CoreFlowPolicy,
)
return CoreFlowPolicy
return await policy_provider()
async def provide_task_orchestration_parameters() -> dict[str, Any]:
parameter_provider = ORCHESTRATION_DEPENDENCIES.get(
"task_orchestration_parameters_provider"
)
if parameter_provider is None:
return cast(dict[str, Any], dict())
return await parameter_provider()
async def provide_flow_orchestration_parameters() -> dict[str, Any]:
parameter_provider = ORCHESTRATION_DEPENDENCIES.get(
"flow_orchestration_parameters_provider"
)
if parameter_provider is None:
return cast(dict[str, Any], dict())
return await parameter_provider()
@contextmanager
def temporary_task_policy(tmp_task_policy: type[TaskRunOrchestrationPolicy]):
starting_task_policy = ORCHESTRATION_DEPENDENCIES["task_policy_provider"]
async def policy_lambda():
return tmp_task_policy
try:
ORCHESTRATION_DEPENDENCIES["task_policy_provider"] = policy_lambda
yield
finally:
ORCHESTRATION_DEPENDENCIES["task_policy_provider"] = starting_task_policy
@contextmanager
def temporary_flow_policy(tmp_flow_policy: type[FlowRunOrchestrationPolicy]):
starting_flow_policy = ORCHESTRATION_DEPENDENCIES["flow_policy_provider"]
async def policy_lambda():
return tmp_flow_policy
try:
ORCHESTRATION_DEPENDENCIES["flow_policy_provider"] = policy_lambda
yield
finally:
ORCHESTRATION_DEPENDENCIES["flow_policy_provider"] = starting_flow_policy
@contextmanager
def temporary_task_orchestration_parameters(
tmp_orchestration_parameters: dict[str, Any],
):
starting_task_orchestration_parameters = ORCHESTRATION_DEPENDENCIES[
"task_orchestration_parameters_provider"
]
async def parameter_lambda():
return tmp_orchestration_parameters
try:
ORCHESTRATION_DEPENDENCIES["task_orchestration_parameters_provider"] = (
parameter_lambda
)
yield
finally:
ORCHESTRATION_DEPENDENCIES["task_orchestration_parameters_provider"] = (
starting_task_orchestration_parameters
)
@contextmanager
def temporary_flow_orchestration_parameters(
tmp_orchestration_parameters: dict[str, Any],
):
starting_flow_orchestration_parameters = ORCHESTRATION_DEPENDENCIES[
"flow_orchestration_parameters_provider"
]
async def parameter_lambda():
return tmp_orchestration_parameters
try:
ORCHESTRATION_DEPENDENCIES["flow_orchestration_parameters_provider"] = (
parameter_lambda
)
yield
finally:
ORCHESTRATION_DEPENDENCIES["flow_orchestration_parameters_provider"] = (
starting_flow_orchestration_parameters
)
| OrchestrationDependencies |
python | getsentry__sentry | tests/sentry/api/test_paginator.py | {
"start": 15591,
"end": 20261
} | class ____(SimpleTestCase):
def test_empty_results(self) -> None:
paginator: SequencePaginator[None] = SequencePaginator([])
result = paginator.get_result(5)
assert list(result) == []
assert result.prev == Cursor(0, 0, True, False)
assert result.next == Cursor(0, 0, False, False)
paginator = SequencePaginator([], reverse=True)
result = paginator.get_result(5)
assert list(result) == []
assert result.prev == Cursor(0, 0, True, False)
assert result.next == Cursor(0, 0, False, False)
def test_ascending_simple(self) -> None:
paginator = SequencePaginator([(i, i) for i in range(10)], reverse=False)
result = paginator.get_result(5)
assert list(result) == [0, 1, 2, 3, 4]
assert result.prev == Cursor(0, 0, True, False)
assert result.next == Cursor(5, 0, False, True)
result = paginator.get_result(5, result.next)
assert list(result) == [5, 6, 7, 8, 9]
assert result.prev == Cursor(5, 0, True, True)
assert result.next == Cursor(9, 1, False, False)
result = paginator.get_result(5, result.prev)
assert list(result) == [0, 1, 2, 3, 4]
assert result.prev == Cursor(0, 0, True, False)
assert result.next == Cursor(5, 0, False, True)
result = paginator.get_result(5, Cursor(100, 0, False))
assert list(result) == []
assert result.prev == Cursor(9, 1, True, True)
assert result.next == Cursor(9, 1, False, False)
def test_descending_simple(self) -> None:
paginator = SequencePaginator([(i, i) for i in range(10)], reverse=True)
result = paginator.get_result(5)
assert list(result) == [9, 8, 7, 6, 5]
assert result.prev == Cursor(9, 0, True, False)
assert result.next == Cursor(4, 0, False, True)
result = paginator.get_result(5, result.next)
assert list(result) == [4, 3, 2, 1, 0]
assert result.prev == Cursor(4, 0, True, True)
assert result.next == Cursor(0, 1, False, False)
result = paginator.get_result(5, result.prev)
assert list(result) == [9, 8, 7, 6, 5]
assert result.prev == Cursor(9, 0, True, False)
assert result.next == Cursor(4, 0, False, True)
result = paginator.get_result(5, Cursor(-10, 0, False))
assert list(result) == []
assert result.prev == Cursor(0, 1, True, True)
assert result.next == Cursor(0, 1, False, False)
def test_ascending_repeated_scores(self) -> None:
paginator = SequencePaginator([(1, i) for i in range(10)], reverse=False)
result = paginator.get_result(5)
assert list(result) == [0, 1, 2, 3, 4]
assert result.prev == Cursor(1, 0, True, False)
assert result.next == Cursor(1, 5, False, True)
result = paginator.get_result(5, result.next)
assert list(result) == [5, 6, 7, 8, 9]
assert result.prev == Cursor(1, 5, True, True)
assert result.next == Cursor(1, 10, False, False)
result = paginator.get_result(5, result.prev)
assert list(result) == [0, 1, 2, 3, 4]
assert result.prev == Cursor(1, 0, True, False)
assert result.next == Cursor(1, 5, False, True)
result = paginator.get_result(5, Cursor(100, 0, False))
assert list(result) == []
assert result.prev == Cursor(1, 10, True, True)
assert result.next == Cursor(1, 10, False, False)
def test_descending_repeated_scores(self) -> None:
paginator = SequencePaginator([(1, i) for i in range(10)], reverse=True)
result = paginator.get_result(5)
assert list(result) == [9, 8, 7, 6, 5]
assert result.prev == Cursor(1, 0, True, False)
assert result.next == Cursor(1, 5, False, True)
result = paginator.get_result(5, result.next)
assert list(result) == [4, 3, 2, 1, 0]
assert result.prev == Cursor(1, 5, True, True)
assert result.next == Cursor(1, 10, False, False)
result = paginator.get_result(5, result.prev)
assert list(result) == [9, 8, 7, 6, 5]
assert result.prev == Cursor(1, 0, True, False)
assert result.next == Cursor(1, 5, False, True)
result = paginator.get_result(5, Cursor(-10, 0, False))
assert list(result) == []
assert result.prev == Cursor(1, 10, True, True)
assert result.next == Cursor(1, 10, False, False)
def test_hits(self) -> None:
n = 10
paginator = SequencePaginator([(i, i) for i in range(n)])
assert paginator.get_result(5, count_hits=True).hits == n
| SequencePaginatorTestCase |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/tests/test_specs_secrets_mask.py | {
"start": 11547,
"end": 13185
} | class ____:
"""Tests for _persist_secrets_to_gcs function."""
@pytest.fixture
def mock_bucket(self):
"""Create a mock GCS bucket."""
return Mock(spec=storage.Bucket)
@pytest.fixture
def mock_blob(self):
"""Create a mock GCS blob."""
mock_blob = Mock()
mock_blob.name = f"{REGISTRIES_FOLDER}/{SPECS_SECRETS_MASK_FILE_NAME}"
return mock_blob
@pytest.mark.parametrize(
"secrets_set,expected_yaml_content,description",
[
(set(), {"properties": []}, "empty secrets set"),
({"password"}, {"properties": ["password"]}, "single secret"),
({"password", "api_key", "token"}, {"properties": ["api_key", "password", "token"]}, "multiple secrets sorted"),
({"z_secret", "a_secret", "m_secret"}, {"properties": ["a_secret", "m_secret", "z_secret"]}, "secrets sorted alphabetically"),
],
)
def test_persist_secrets_to_gcs_various_secret_sets(self, mock_bucket, mock_blob, secrets_set, expected_yaml_content, description):
"""Test persistence with different secret set sizes and contents."""
mock_bucket.blob.return_value = mock_blob
_persist_secrets_to_gcs(secrets_set, mock_bucket)
mock_bucket.blob.assert_called_once_with(f"{REGISTRIES_FOLDER}/{SPECS_SECRETS_MASK_FILE_NAME}")
mock_blob.upload_from_string.assert_called_once()
uploaded_content = mock_blob.upload_from_string.call_args[0][0]
parsed_yaml = yaml.safe_load(uploaded_content)
assert parsed_yaml == expected_yaml_content, f"Failed for {description}"
| TestPersistSecretsToGcs |
python | huggingface__transformers | src/transformers/masking_utils.py | {
"start": 27656,
"end": 64904
} | class ____(GeneralInterface):
# Class instance object, so that a call to `register` can be reflected into all other files correctly, even if
# a new instance is created (in order to locally override a given function)
_global_mapping = {
"sdpa": sdpa_mask,
"eager": eager_mask,
"flash_attention_2": flash_attention_mask,
"flash_attention_3": flash_attention_mask,
"flex_attention": flex_attention_mask,
}
# Global AttentionMaskInterface shared by all models which do not need to overwrite any of the existing ones
ALL_MASK_ATTENTION_FUNCTIONS: AttentionMaskInterface = AttentionMaskInterface()
def find_packed_sequence_indices(position_ids: torch.Tensor) -> Optional[torch.Tensor]:
"""
Find the indices of the sequence to which each new query token in the sequence belongs when using packed
tensor format (i.e. several sequences packed in the same batch dimension).
Args:
position_ids (`torch.Tensor`)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
Returns:
A 2D tensor where each similar integer indicates that the tokens belong to the same sequence. For example, if we
pack 3 sequences of 2, 3 and 1 tokens respectively along a single batch dim, this will return [[0, 0, 1, 1, 1, 2]].
If the there is only one sequence in each batch item (and we don't compile), then we return `None` indicating
no packed sequences. This is the same as [[0, 0, 0, 0, 0, 0]] for the example above.
"""
# What separate different sequences is when 2 consecutive positions_ids are separated by more than 1. So
# taking the diff (by prepending the first value - 1 to keep correct indexing) and applying cumsum to the result
# gives exactly the sequence indices
# Note that we assume that a single sequence cannot span several batch dimensions, i.e. 1 single sequence
# cannot be part of the end of the first batch dim and the start of the 2nd one for example
first_dummy_value = position_ids[:, :1] - 1 # We just need the diff on this first value to be 1
position_diff = torch.diff(position_ids, prepend=first_dummy_value, dim=-1)
packed_sequence_mask = (position_diff != 1).cumsum(-1)
# Sadly this is a dynamic control flow, so we cannot enable this check on anything compile related
if not is_tracing(packed_sequence_mask) and (packed_sequence_mask[:, -1] == 0).all():
return None
return packed_sequence_mask
def _preprocess_mask_arguments(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[Union[torch.Tensor, BlockMask]],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor],
layer_idx: Optional[int],
) -> tuple[bool, Optional[Union[torch.Tensor, BlockMask]], int, int]:
"""
Perform some common pre-processing of the mask arguments we get from the modeling code. Mostly determine the
key-value length and offsets, and if we should early exit or not.
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
layer_idx (`int`, optional):
If `past_key_values` is not None, this is the layer index of the cache from which to get the key-value
length and offset. Indeed, for hybrid caches, different layers may return different lengths.
Returns:
early_exit (`bool`):
Whether we should early exit mask creation, and return the mask as-is.
attention_mask (`torch.Tensor` or `BlockMask` or `None`):
The attention mask to either return immediately, or to use in downstream mask creation.
packed_sequence_mask (`torch.Tensor`, optional):
In case we detected packed sequence format, this is a tensor where each similar integer indicates that
the tokens belong to the same sequence.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`):
An offset to indicate at which first position the key and values states will refer to.
"""
# If the mask is already 4D, simply return as-is (it was already prepared, or it is custom)
if isinstance(attention_mask, (torch.Tensor, BlockMask)) and len(attention_mask.shape) == 4:
return True, attention_mask, None, None, None
# For TGI/vLLM backends, or other custom attention without equivalent mask creation: we don't need a mask!
# Note: it's not ideal to check the `_global_mapping` attribute instead of the object itself, however otherwise
# full graph dynamo tracing (i.e. torch.export or compile with `fullgraph=True`) will fail on Python<3.11
# with `torch._dynamo.exc.Unsupported: 'inline in skipfiles:Mapping.__contains__ | __contains__, skipped
# according trace_rules.lookup SKIP_DIRS'` -- can be removed when we require Python>=3.11
if config._attn_implementation not in ALL_MASK_ATTENTION_FUNCTIONS._global_mapping:
return True, None, None, None, None
# Move the mask to correct device, and potentially switch dtype for efficiency
if attention_mask is not None and attention_mask.ndim == 2:
attention_mask = attention_mask.to(device=cache_position.device, dtype=torch.bool)
# If using a cache, it can give all information about mask sizes based on seen tokens
if past_key_values is not None:
kv_length, kv_offset = past_key_values.get_mask_sizes(cache_position, layer_idx)
# Otherwise, we infer based on our input
else:
# 1. Rely on input directly
if attention_mask is None:
kv_length, kv_offset = input_embeds.shape[1], 0
# 2. Rely on the mask instead - needed for special cases like prefix tuning in PEFT
#
# This is a very unique and special case where an encoder utilizes a cache and expects its length
# to be accounted for (usually, they should never use a cache). In general, the mask should always
# match with the input sizes nonetheless (i.e. it does not affect others).
# Conclusion: "prefix tuning is evil"
else:
kv_length, kv_offset = attention_mask.shape[-1], 0
# We check the position_ids for potential packed sequence format (only if the 2D attention mask is explicitly None,
# and we don't have past_key_values, i.e. generally a training setup)
packed_sequence_mask = None
if position_ids is not None and attention_mask is None and past_key_values is None:
batch_size = input_embeds.shape[0]
# The position ids are sometimes just unsqueezed, without being expanded
if batch_size != position_ids.shape[0]:
position_ids = position_ids.expand(batch_size, -1)
packed_sequence_mask = find_packed_sequence_indices(position_ids)
return False, attention_mask, packed_sequence_mask, kv_length, kv_offset
def create_causal_mask(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor] = None,
or_mask_function: Optional[Callable] = None,
and_mask_function: Optional[Callable] = None,
) -> Optional[Union[torch.Tensor, BlockMask]]:
"""
Create a standard causal mask based on the attention implementation used (stored in the config). If `past_key_values`
has an hybrid cache structure, this function will return the mask corresponding to one of the "full_attention" layers (to align
to what is needed in the `modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
"""
# If we have an hybrid cache structure, here we want to create the mask for the full layers
if hasattr(past_key_values, "is_sliding") and False in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(False)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, input_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
batch_size, dtype = input_embeds.shape[0], input_embeds.dtype
mask_factory_function = causal_mask_function
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
if _is_torch_xpu_available:
# Do not allow skip if we are compiling for decoding, but for prefill, we still allow skip to optimization the perf of 1st token generation
allow_is_causal_skip = not (getattr(past_key_values, "is_compileable", False) and cache_position.shape[0] == 1)
else:
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
def create_bidirectional_mask(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
encoder_hidden_states: Optional[torch.Tensor] = None,
or_mask_function: Optional[Callable] = None,
and_mask_function: Optional[Callable] = None,
) -> Optional[Union[torch.Tensor, BlockMask]]:
"""
Create a standard bidirectional mask based on the attention implementation used (stored in the config).
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is only used to infer metadata
such as the batch size, query length, dtype, and device.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, kv_length).
It can also be an already prepared 4D mask of shape (batch_size, 1, query_length, kv_length),
in which case it is returned as-is.
encoder_hidden_states (`torch.Tensor`, optional):
The input embeddings of shape (batch_size, kv_length, hidden_dim). If provided, it is used instead of
`input_embeds` to infer the batch size, kv length and dtype.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the union of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
"""
# Due to the logic surrounding `cache_position` in inferring query-related information, we
# construct a dummy tensor imitating initial positions
cache_position = torch.arange(input_embeds.shape[1], device=input_embeds.device, dtype=torch.long)
embeds = encoder_hidden_states if encoder_hidden_states is not None else input_embeds
# We ignore a few irrelevant arguments at the end as we do not have a (growing) cache here
early_exit, attention_mask, _, kv_length, kv_offset = _preprocess_mask_arguments(
config, embeds, attention_mask, cache_position, None, None, 0
)
if early_exit:
return attention_mask
batch_size, dtype = embeds.shape[0], embeds.dtype
mask_factory_function = bidirectional_mask_function
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Allow skipping the mask creation except we have additional masking operators (and/or masks)
allow_is_bidirectional_skip = True
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Allow slight deviations from the base mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
# We now create the mask
attention_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
# Additional kwargs for sdpa
allow_is_causal_skip=False,
allow_is_bidirectional_skip=allow_is_bidirectional_skip,
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return attention_mask
def create_sliding_window_causal_mask(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor] = None,
or_mask_function: Optional[Callable] = None,
and_mask_function: Optional[Callable] = None,
) -> Optional[Union[torch.Tensor, BlockMask]]:
"""
Create a sliding window causal mask based on the attention implementation used (stored in the config). This type
of attention pattern was mostly democratized by Mistral. If `past_key_values` has an hybrid cache structure, this
function will return the mask corresponding to one of the "sliding_attention" layers (to align to what is needed in the
`modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the sliding causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the sliding causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
"""
# If we have an hybrid cache structure, here we want to create the mask for the sliding layers
if hasattr(past_key_values, "is_sliding") and True in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(True)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, input_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
sliding_window = getattr(config, "sliding_window", None)
if sliding_window is None:
raise ValueError("Could not find a `sliding_window` argument in the config, or it is not set")
batch_size, dtype = input_embeds.shape[0], input_embeds.dtype
mask_factory_function = sliding_window_causal_mask_function(sliding_window)
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
local_size=sliding_window, # Additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
def create_chunked_causal_mask(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor] = None,
or_mask_function: Optional[Callable] = None,
and_mask_function: Optional[Callable] = None,
) -> Optional[Union[torch.Tensor, BlockMask]]:
"""
Create a chunked attention causal mask based on the attention implementation used (stored in the config). This type
of attention pattern was mostly democratized by Llama4. If `past_key_values` has an hybrid cache structure, this
function will return the mask corresponding to one of the "chunked_attention" layers (to align to what is needed in the
`modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the chunked causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the chunked causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
"""
# If we have an hybrid cache structure, here we want to create the mask for the sliding layers
if hasattr(past_key_values, "is_sliding") and True in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(True)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, input_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
chunk_size = getattr(config, "attention_chunk_size", None)
if chunk_size is None:
raise ValueError("Could not find an `attention_chunk_size` argument in the config, or it is not set")
# Raise if using chunked attention on context too large with FA2
if config._attn_implementation == "flash_attention_2" and kv_length + kv_offset > chunk_size:
raise ValueError(
"Flash attention 2 cannot handle chunked attention, and the key-value length is larger than the chunk size so the "
"chunked pattern cannot be respected. You should use another `attn_implementation` when instantiating the model"
)
batch_size, dtype = input_embeds.shape[0], input_embeds.dtype
# For chunked attention and batched inputs, we need to take the number of left padding tokens into account
# to start the chunk from the actual start of the sequence for the padded sequence
if attention_mask is not None:
# Only count the left padding tokens, not all of them
left_padding_tokens = (attention_mask.cumsum(dim=-1) == torch.zeros_like(attention_mask)).sum(dim=-1)
else:
left_padding_tokens = torch.zeros(batch_size, device=cache_position.device, dtype=int)
mask_factory_function = chunked_causal_mask_function(chunk_size, left_padding_tokens)
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
local_size=chunk_size, # Additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
LAYER_PATTERN_TO_MASK_FUNCTION_MAPPING = {
"full_attention": create_causal_mask,
"sliding_attention": create_sliding_window_causal_mask,
"chunked_attention": create_chunked_causal_mask,
}
def create_masks_for_generate(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor] = None,
or_mask_function: Optional[Callable] = None,
and_mask_function: Optional[Callable] = None,
**kwargs,
):
"""
This function mimics how we create the masks in the `modeling_xxx.py` files, and is used in places like `generate`
in order to easily create the masks in advance, when we compile the forwards with Static caches.
Args:
config (`PreTrainedConfig`):
The model config.
input_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the other mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the other mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
"""
# The attribute reside in the text config for composite models
effective_config = config.get_text_config()
# Prepare the mask args
mask_kwargs = {
"config": effective_config,
"input_embeds": input_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
"or_mask_function": or_mask_function,
"and_mask_function": and_mask_function,
}
# If the attribute exist, we need several masks
if hasattr(effective_config, "layer_types"):
causal_masks = {}
for layer_pattern in set(effective_config.layer_types):
causal_masks[layer_pattern] = LAYER_PATTERN_TO_MASK_FUNCTION_MAPPING[layer_pattern](**mask_kwargs)
return causal_masks
# In this case, all layers are sliding
elif getattr(effective_config, "sliding_window", None) is not None:
return create_sliding_window_causal_mask(**mask_kwargs)
# In this case, all layers are chunked
elif getattr(effective_config, "attention_chunk_size", None) is not None:
return create_chunked_causal_mask(**mask_kwargs)
# All layers use standard causal attention
return create_causal_mask(**mask_kwargs)
# Below are utilities to pretty-print the different masks
# Print the matrix with words as row labels
GREEN = "\033[92m"
YELLOW = "\033[93m"
RESET = "\033[0m"
BLACK_SQUARE = "■"
WHITE_SQUARE = "⬚"
GREY_SQUARE = "∙"
LOW_TRIANGLE = "⬕"
UPPER_TRIANGLE = "⬔"
def get_style(style):
if style == "majong":
BLACK_SQUARE = "🀞" # Full block (represents "on" or active)
BLACK_SQUARE = "🀙" # Full block (represents "on" or active)
WHITE_SQUARE = "🀆" # "▒" # Light shade (represents "off" or inactive)
LOW_TRIANGLE = "🀛" # Lower left triangle (stylized indication)
UPPER_TRIANGLE = "🀛" # Upper left triangle (stylized indication)
else:
BLACK_SQUARE = "█" # Full block (represents "on" or active)
WHITE_SQUARE = "░" # "▒" # Light shade (represents "off" or inactive)
LOW_TRIANGLE = "▙" # Lower left triangle (stylized indication))
UPPER_TRIANGLE = "▜" # Upper left triangle (stylized indication)
return BLACK_SQUARE, WHITE_SQUARE, LOW_TRIANGLE, UPPER_TRIANGLE
# LOW_TRIANGLE = UPPER_TRIANGLE = "⟍" # Upper right triangle (stylized indication)
YELLOW_SQUARE = f"{YELLOW}{BLACK_SQUARE}{RESET}"
GREEN_SQUARE = f"{GREEN}{BLACK_SQUARE}{RESET}"
def tensor_to_mask_visual(original_tensor: torch.Tensor, grid_size=(20, 40), style="majong") -> str:
BLACK_SQUARE, WHITE_SQUARE, LOW_TRIANGLE, UPPER_TRIANGLE = get_style(style)
h, w = original_tensor.shape
max_h, max_w = grid_size
if not (h < max_h and w < max_w):
# Preserve aspect ratio within max grid size
aspect_ratio = 2 * w / h
if aspect_ratio > 1:
w = max_w
h = min(max_h, max(1, round(max_w / aspect_ratio)))
else:
h = max_h
w = max(1, round(max_h * aspect_ratio))
# Step 1: Rescale tensor by average pooling
tensor = original_tensor.unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions
tensor = F.adaptive_avg_pool2d(tensor, output_size=(h, w))[0, 0] # Remove extra dims
else:
tensor = original_tensor
# Step 3: Build the string representation
result = []
for i in range(h):
row = ""
for j in range(w):
if tensor[i, j] == 1:
row += BLACK_SQUARE
elif tensor[i, j] == 0:
row += WHITE_SQUARE
else:
if j > 0:
if tensor[i, j - 1] == 1:
row += LOW_TRIANGLE
elif tensor[i, j - 1] == 0:
row += UPPER_TRIANGLE
else:
row += BLACK_SQUARE if tensor[i, j] == 1 else WHITE_SQUARE
else:
row += (
BLACK_SQUARE
if tensor[i, j] == 1
else (
WHITE_SQUARE
if tensor[i, j] == 0
else (UPPER_TRIANGLE if tensor[i, j + 1] == 1 else LOW_TRIANGLE)
)
)
result.append(row)
return "\n".join(result)
| AttentionMaskInterface |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/final3.py | {
"start": 4223,
"end": 4393
} | class ____:
def __init__(self):
self.x: Final = 1
def method1(self):
# This should generate an error because x is Final.
self.x += 1
| ClassD |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/ecs.py | {
"start": 11622,
"end": 13602
} | class ____(EcsBaseOperator):
"""
Register a task definition on AWS ECS.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsRegisterTaskDefinitionOperator`
:param family: The family name of a task definition to create.
:param container_definitions: A list of container definitions in JSON format that describe
the different containers that make up your task.
:param register_task_kwargs: Extra arguments for Register Task Definition.
"""
template_fields: Sequence[str] = (
"family",
"container_definitions",
"register_task_kwargs",
)
def __init__(
self,
*,
family: str,
container_definitions: list[dict],
register_task_kwargs: dict | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.family = family
self.container_definitions = container_definitions
self.register_task_kwargs = register_task_kwargs or {}
def execute(self, context: Context):
self.log.info(
"Registering task definition %s using the following values: %s",
self.family,
self.register_task_kwargs,
)
self.log.info("Using container definition %s", self.container_definitions)
response = self.client.register_task_definition(
family=self.family,
containerDefinitions=self.container_definitions,
**self.register_task_kwargs,
)
task_definition_details = response["taskDefinition"]
task_definition_arn = task_definition_details["taskDefinitionArn"]
self.log.info(
"Task Definition %r in state: %r.", task_definition_arn, task_definition_details.get("status")
)
context["ti"].xcom_push(key="task_definition_arn", value=task_definition_arn)
return task_definition_arn
| EcsRegisterTaskDefinitionOperator |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 34527,
"end": 38744
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[8]", L_y_: "f32[8]"):
l_x_ = L_x_
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = None
a: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None
subgraph_1 = self.subgraph_1
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_1', a, l_y_); subgraph_1 = a = l_y_ = None
getitem_1: "f32[8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem_1,)
class subgraph_0(torch.nn.Module):
def forward(self, l_x_: "f32[8]", l_y_: "f32[8]"):
mul: "f32[8]" = torch.mul(l_x_, l_y_); l_x_ = l_y_ = None
mul_1: "f32[8]" = mul * 2; mul = None
return (mul_1,)
class subgraph_1(torch.nn.Module):
def forward(self, a: "f32[8]", l_y_: "f32[8]"):
mul: "f32[8]" = torch.mul(a, l_y_); a = l_y_ = None
mul_1: "f32[8]" = mul * 3; mul = None
return (mul_1,)
""",
)
@unittest.expectedFailure
def test_nonlocal_list_mutation_hidden(self):
"""Test that nonlocal list mutation inside nested_compile_region is handled correctly."""
@nested_compile_region
def gn(x, z):
o = torch.matmul(x, x) @ x
out = x.sin()
z.append(out)
return torch.cos(torch.sin(o)), torch.sin(x)
def fn(x):
z = []
outs = gn(x, z)
out1 = outs[0]
# Check that the extra output pytree handling is done properly
out2 = outs[-1]
return out1 + out2, z[0]
x = torch.randn(4, 4, requires_grad=True)
ref = fn(x)
opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref[0], res[0])
self.assertEqual(ref[1], res[1])
@inductor_config.patch("fx_graph_cache", False)
def test_view_to_reshape(self):
@nested_compile_region
def gn(x):
x = torch.sin(x)
x = x.view(1, 8)
return torch.sin(x)
def fn(x):
return gn(x)
x = torch.randn(8, requires_grad=False)
torch._dynamo.reset()
backend = InductorAndRecordGraphs()
torch.compile(fn, backend=backend, fullgraph=True)(x)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(
backend.inductor_graphs[0].print_readable(print_output=False)
),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[8]"):
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1); repeated_subgraph0 = arg0_1 = None
getitem: "f32[1, 8]" = invoke_subgraph[0]; invoke_subgraph = None
return (getitem,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[8]"):
sin: "f32[8]" = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
view: "f32[1, 8]" = torch.ops.aten.reshape.default(sin, [1, 8]); sin = None
sin_1: "f32[1, 8]" = torch.ops.aten.sin.default(view); view = None
return (sin_1,)
""",
)
def test_normalize_gm(self):
@nested_compile_region
def gn(x, y):
# Different graph give different names to intermediate nodes
for _ in range(5):
x = x * y
return x
def fn(x, y):
for _ in range(5):
x = gn(x, y)
return x
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
opt_fn(x, y)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | numpy__numpy | numpy/random/tests/test_smoke.py | {
"start": 27772,
"end": 28066
} | class ____(RNG):
@classmethod
def _create_rng(cls):
bit_generator = SFC64
advance = None
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 192
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
| TestSFC64 |
python | huggingface__transformers | tests/models/fuyu/test_modeling_fuyu.py | {
"start": 5616,
"end": 10146
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
FuyuModel,
FuyuForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"text-generation": FuyuForCausalLM, "image-text-to-text": FuyuForCausalLM} if is_torch_available() else {}
)
test_cpu_offload = False
test_disk_offload = False
def setUp(self):
self.model_tester = FuyuModelTester(self)
def test_mismatching_image_patches(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
# two image token and two image
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
input_ids = curr_input_dict["input_ids"]
image_patches = curr_input_dict["image_patches"][1:, ...]
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, image_patches=image_patches)
# remove one image token from text
input_ids = curr_input_dict["input_ids"][2:]
image_patches = curr_input_dict["image_patches"]
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, image_patches=image_patches)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices")
def test_assisted_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices")
def test_assisted_decoding_sample(self):
pass
# TODO: Fix me (once this model gets more usage)
@unittest.skip(reason="Does not work on the tiny model.")
def test_disk_offload_bin(self):
super().test_disk_offload()
# TODO: Fix me (once this model gets more usage)
@unittest.skip(reason="Does not work on the tiny model.")
def test_disk_offload_safetensors(self):
super().test_disk_offload()
# TODO: Fix me (once this model gets more usage)
@unittest.skip(reason="Does not work on the tiny model.")
def test_model_parallelism(self):
super().test_model_parallelism()
@unittest.skip(reason="Fuyu `prepare_inputs_for_generation` function doesn't have cache position.")
def test_generate_continue_from_inputs_embeds(self):
pass
@unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(reason="Fuyu has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
@slow
@require_torch_accelerator
| FuyuModelTest |
python | tensorflow__tensorflow | tensorflow/python/ops/op_selector.py | {
"start": 10414,
"end": 14168
} | class ____(Exception):
"""Raised if a Tensor cannot be lifted from the graph."""
# Prevent autograph from rewriting this error.
ag_pass_through = True
def _as_operation(op_or_tensor):
if isinstance(op_or_tensor, tensor_lib.Tensor):
return op_or_tensor.op
return op_or_tensor
def graph_inputs(op):
return [x.op for x in op.inputs] + list(op.control_inputs)
def show_path(from_op, tensors, sources):
"""Find one path from `from_op` to any of `tensors`, ignoring `sources`.
Args:
from_op: A `tf.Operation`.
tensors: A `tf.Operation`, a `tf.Tensor`, or a list thereof.
sources: A list of `tf.Tensor`.
Returns:
A python string containing the path, or "??" if none is found.
"""
if isinstance(from_op, tensor_lib.Tensor):
from_op = from_op.op
if not isinstance(tensors, list):
tensors = [tensors]
final_ops = [_as_operation(tensor) for tensor in tensors]
visited_ops = set(x.op for x in sources)
ops_to_visit = list(final_ops)
some_op_output = {}
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
if op == from_op:
path_op = op
path = [path_op]
while path_op not in final_ops:
path_op = some_op_output[path_op]
path.append(path_op)
return " <- ".join("%s (%s)" % (x.name, x.type) for x in reversed(path))
else:
for inp in graph_inputs(op):
if inp not in visited_ops and inp not in sources:
some_op_output[inp] = op
ops_to_visit.append(inp)
return "??"
# TODO(jmenick) - there is considerable duplication of functionality between
# this function and get_backward_walk_ops(). Need to deduplicate.
def map_subgraph(init_tensor, sources, disallowed_placeholders, visited_ops,
op_outputs, add_sources):
"""Walk a Graph and capture the subgraph between init_tensor and sources.
Note: This function mutates visited_ops and op_outputs.
Args:
init_tensor: A Tensor or Operation where the subgraph terminates.
sources: A set of Tensors where subgraph extraction should stop.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
visited_ops: A set of operations which were visited in a prior pass.
op_outputs: A defaultdict containing the outputs of an op which are to be
copied into the new subgraph.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
Returns:
The set of placeholders upon which init_tensor depends and are not in
sources.
Raises:
UnliftableError: if init_tensor depends on a placeholder which is not in
sources and add_sources is False.
"""
ops_to_visit = [_as_operation(init_tensor)]
extra_sources = object_identity.ObjectIdentitySet()
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
should_raise = False
if disallowed_placeholders is not None and op in disallowed_placeholders:
should_raise = True
elif op.type == "Placeholder":
if disallowed_placeholders is None and not add_sources:
should_raise = True
extra_sources.update(op.outputs)
if should_raise:
raise UnliftableError(
"Unable to lift tensor %s because it depends transitively on "
"placeholder %s via at least one path, e.g.: %s" %
(repr(init_tensor), repr(op), show_path(op, init_tensor, sources)))
for inp in graph_inputs(op):
op_outputs[inp].add(op)
if inp not in visited_ops and inp not in (sources or extra_sources):
ops_to_visit.append(inp)
return extra_sources
| UnliftableError |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 7163,
"end": 7796
} | class ____(ConverterAPI):
"""
Convert string-formatted floating point values to `Decimal` type.
"""
def is_convertible(self, value: Any) -> bool:
# Matches only string-formatted floats with an optional sign character (+/-).
# Leading and trailing zeros are required.
# NOTE: `re.fullmatch` will only match the full string, so "1.0 ether" and "10.0 USDC"
# will not be identified as convertible.
return isinstance(value, str) and re.fullmatch(r"[+-]?\d+\.\d+", value) is not None
def convert(self, value: str) -> Decimal:
return Decimal(value)
| StringDecimalConverter |
python | walkccc__LeetCode | solutions/320. Generalized Abbreviation/320.py | {
"start": 0,
"end": 619
} | class ____:
def generateAbbreviations(self, word: str) -> list[str]:
ans = []
def getCountString(count: int) -> str:
return str(count) if count > 0 else ''
def dfs(i: int, count: int, path: list[str]) -> None:
if i == len(word):
ans.append(''.join(path) + getCountString(count))
return
# Abbreviate the word[i].
dfs(i + 1, count + 1, path)
# Keep the word[i], so consume the count as a string.
path.append(getCountString(count) + word[i])
# Reset the count to 0.
dfs(i + 1, 0, path)
path.pop()
dfs(0, 0, [])
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 211522,
"end": 212182
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("BranchProtectionRuleEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("BranchProtectionRule"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| BranchProtectionRuleConnection |
python | ansible__ansible | test/integration/targets/shell-plugins/action_plugins/test_shell.py | {
"start": 247,
"end": 521
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
result['shell'] = self._connection._shell.SHELL_FAMILY
return result
| ActionModule |
python | Textualize__textual | docs/examples/how-to/center07.py | {
"start": 181,
"end": 635
} | class ____(App):
"""How to center things."""
CSS = """
Screen {
align: center middle;
}
#hello {
background: blue 50%;
border: wide white;
width: 40;
height: 9;
text-align: center;
content-align: center middle;
}
"""
def compose(self) -> ComposeResult:
yield Static(QUOTE, id="hello")
if __name__ == "__main__":
app = CenterApp()
app.run()
| CenterApp |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 1167,
"end": 1310
} | class ____(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice_string_w_none"]
| EmptyCharLabelNoneChoiceForm |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 5456,
"end": 5774
} | class ____(BaseEstimator):
# Note that object is an uninitialized class, thus immutable.
def __init__(self, p=42, q=np.int32(42), r=object):
self.p = p
self.q = q
self.r = r
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
| HasImmutableParameters |
python | django-haystack__django-haystack | test_haystack/test_views.py | {
"start": 569,
"end": 691
} | class ____(SearchForm):
q = forms.CharField(initial="Search for...", required=False, label="Search")
| InitialedSearchForm |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_model.py | {
"start": 6436,
"end": 6636
} | class ____(ToFromDirectTestBase, ToFromModelTestMixin):
"""Directly test ``to/from_model``."""
def setup_class(self):
self.functions = {"to": to_model, "from": from_model}
| TestToFromModel |
python | pytorch__pytorch | torch/_subclasses/meta_utils.py | {
"start": 19844,
"end": 20424
} | class ____(ViewFunc["FakeTensor"]):
@override
def apply(
self,
t: torch.Tensor,
new_base: torch.Tensor,
symint_visitor_fn: Optional[Callable[[int], int]] = None,
tensor_visitor_fn: Optional[Callable[[torch.Tensor], FakeTensor]] = None,
) -> FakeTensor:
return torch._subclasses.fake_tensor.FakeTensor._view_func_unsafe(
# pyrefly: ignore [bad-argument-type]
t,
new_base,
symint_visitor_fn,
tensor_visitor_fn,
)
@dataclass(frozen=True)
| _FakeTensorViewFunc |
python | pypa__warehouse | warehouse/email/interfaces.py | {
"start": 78,
"end": 504
} | class ____(Interface):
def create_service(context, request):
"""
Create the service, given the context and request for which it is being
created for.
"""
def send(recipient, message):
"""
Sends an EmailMessage to the given recipient.
"""
def last_sent(to, subject):
"""
Determines when an email was last sent, if at all
"""
| IEmailSender |
python | openai__openai-python | tests/test_response.py | {
"start": 1671,
"end": 3650
} | class ____(pydantic.BaseModel): ...
def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
with pytest.raises(
TypeError,
match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`",
):
response.parse(to=PydanticModel)
@pytest.mark.asyncio
async def test_async_response_parse_mismatched_basemodel(async_client: AsyncOpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
with pytest.raises(
TypeError,
match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`",
):
await response.parse(to=PydanticModel)
def test_response_parse_custom_stream(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=True,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
stream = response.parse(to=Stream[int])
assert stream._cast_to == int
@pytest.mark.asyncio
async def test_async_response_parse_custom_stream(async_client: AsyncOpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
stream=True,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
stream = await response.parse(to=Stream[int])
assert stream._cast_to == int
| PydanticModel |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/views.py | {
"start": 2375,
"end": 5182
} | class ____(IndexView):
"""
A simple view that inherits from FAB index view.
The only goal of this view is to redirect the user to the Airflow 3 UI index page if the user is
authenticated. It is impossible to redirect the user directly to the Airflow 3 UI index page before
redirecting them to this page because FAB itself defines the logic redirection and does not allow external
redirect.
"""
@expose("/")
def index(self):
return redirect(conf.get("api", "base_url", fallback="/"), code=302)
def show_traceback(error):
"""Show Traceback for a given error."""
return render_template("airflow/traceback.html"), 500
def not_found(error):
"""Show Not Found on screen for any error in the Webserver."""
return (
render_template(
"airflow/error.html",
hostname="",
status_code=404,
error_message="Page cannot be found.",
),
404,
)
def get_safe_url(url):
"""Given a user-supplied URL, ensure it points to our web server."""
if not url:
return url_for("FabIndexView.index")
# If the url contains semicolon, redirect it to homepage to avoid
# potential XSS. (Similar to https://github.com/python/cpython/pull/24297/files (bpo-42967))
if ";" in unquote(url):
return url_for("FabIndexView.index")
url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE)
host_url = urlsplit(request.host_url)
redirect_url = urlsplit(urljoin(request.host_url, url))
if not (redirect_url.scheme in ("http", "https") and host_url.netloc == redirect_url.netloc):
return url_for("FabIndexView.index")
# This will ensure we only redirect to the right scheme/netloc
return redirect_url.geturl()
def redirect(*args, **kwargs):
if g.user is not None and g.user.is_authenticated:
token = get_auth_manager().generate_jwt(g.user)
response = make_response(flask_redirect(*args, **kwargs))
secure = request.scheme == "https" or bool(conf.get("api", "ssl_cert", fallback=""))
# In Airflow 3.1.1 authentication changes, front-end no longer handle the token
# See https://github.com/apache/airflow/pull/55506
if AIRFLOW_V_3_1_1_PLUS:
response.set_cookie(COOKIE_NAME_JWT_TOKEN, token, secure=secure, httponly=True)
else:
response.set_cookie(COOKIE_NAME_JWT_TOKEN, token, secure=secure)
return response
return flask_redirect(*args, **kwargs)
def method_not_allowed(error):
"""Show Method Not Allowed on screen for any error in the Webserver."""
return (
render_template(
"airflow/error.html",
status_code=405,
error_message="Received an invalid request.",
),
405,
)
| FabIndexView |
python | pytorch__pytorch | test/quantization/core/experimental/test_fake_quantize.py | {
"start": 524,
"end": 3793
} | class ____(unittest.TestCase):
r""" Tests fake quantize calculate_qparams() method
by comparing with result from observer calculate_qparams.
Uses hard-coded values: alpha=1.0, b=4, k=2.
"""
def test_fake_calc_qparams(self):
apot_fake = APoTFakeQuantize(b=4, k=2)
apot_fake.activation_post_process.min_val = torch.tensor([0.0])
apot_fake.activation_post_process.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = apot_fake.calculate_qparams(signed=False)
observer = APoTObserver(b=4, k=2)
observer.min_val = torch.tensor([0.0])
observer.max_val = torch.tensor([1.0])
qparams_expected = observer.calculate_qparams(signed=False)
self.assertEqual(alpha, qparams_expected[0])
self.assertTrue(torch.equal(gamma, qparams_expected[1]))
self.assertTrue(torch.equal(quantization_levels, qparams_expected[2]))
self.assertTrue(torch.equal(level_indices, qparams_expected[3]))
r""" Tests fake quantize forward() method
by comparing result with expected
quant_dequant_APoT mapping of input tensor.
Uses input tensor with random values from 0 -> 1000
and APoT observer with hard-coded values b=4, k=2
"""
def test_forward(self):
# generate a tensor of size 20 with random values
# between 0 -> 1000 to quantize -> dequantize
X = 1000 * torch.rand(20)
observer = APoTObserver(b=4, k=2)
observer.forward(X)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
apot_fake = APoTFakeQuantize(b=4, k=2)
apot_fake.enable_observer()
apot_fake.enable_fake_quant()
X_reduced_precision_fp = apot_fake.forward(torch.clone(X), False)
# get X_expected by converting fp -> apot -> fp to simulate quantize -> dequantize
X_to_apot = quantize_APoT(X, alpha, gamma, quantization_levels, level_indices)
X_expected = dequantize_APoT(X_to_apot)
self.assertTrue(torch.equal(X_reduced_precision_fp, X_expected))
r""" Tests fake quantize forward() method
throws error when qparams are None
"""
def test_forward_exception(self):
# generate a tensor of size 20 with random values
# between 0 -> 1000 to quantize -> dequantize
X = 1000 * torch.rand(20)
apot_fake = APoTFakeQuantize(b=4, k=2)
# disable observer so qparams not set, qparams are all None
apot_fake.disable_observer()
apot_fake.enable_fake_quant()
with self.assertRaises(Exception):
apot_fake.forward(torch.clone(X), False)
r""" Tests fake quantize helper backward() method
using torch.autograd.gradcheck function.
"""
def test_backward(self):
input = torch.randn(20, dtype=torch.double, requires_grad=True)
observer = APoTObserver(b=4, k=2)
observer(input)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
gradcheck(fake_quantize_function.apply, (input, alpha, gamma, quantization_levels, level_indices), atol=1e-4)
if __name__ == '__main__':
unittest.main()
| TestFakeQuantize |
python | django__django | tests/queries/test_query.py | {
"start": 6750,
"end": 8847
} | class ____(TestCase):
def test_rawsql_annotation(self):
query = Query(None)
sql = "%s = 1"
# Wrap with a CASE WHEN expression if a database backend (e.g. Oracle)
# doesn't support boolean expression in SELECT list.
if not connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
query.add_annotation(RawSQL(sql, (1,), BooleanField()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_subquery_annotation(self):
query = Query(None)
query.add_annotation(Exists(Item.objects.all()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 0)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_q_annotation(self):
query = Query(None)
check = ExpressionWrapper(
Q(RawSQL("%s = 1", (1,), BooleanField())) | Q(Exists(Item.objects.all())),
BooleanField(),
)
query.add_annotation(check, "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_names_to_path_field(self):
query = Query(None)
query.add_annotation(Value(True), "value")
path, final_field, targets, names = query.names_to_path(["value"], opts=None)
self.assertEqual(path, [])
self.assertIsInstance(final_field, BooleanField)
self.assertEqual(len(targets), 1)
self.assertIsInstance(targets[0], BooleanField)
self.assertEqual(names, [])
def test_names_to_path_field_error(self):
query = Query(None)
msg = "Cannot resolve keyword 'nonexistent' into field."
with self.assertRaisesMessage(FieldError, msg):
query.names_to_path(["nonexistent"], opts=None)
def test_get_field_names_from_opts(self):
self.assertEqual(get_field_names_from_opts(None), set())
| TestQueryNoModel |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_constructors.py | {
"start": 313,
"end": 9289
} | class ____:
def test_array_of_dt64_nat_raises(self):
# GH#39462
nat = np.datetime64("NaT", "ns")
arr = np.array([nat], dtype=object)
msg = "Invalid type for timedelta scalar"
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(arr)
with pytest.raises(TypeError, match=msg):
TimedeltaArray._from_sequence(arr, dtype="m8[ns]")
with pytest.raises(TypeError, match=msg):
to_timedelta(arr)
def test_int64_nocopy(self):
# GH#23539 check that a copy isn't made when we pass int64 data
# and copy=False
arr = np.arange(10, dtype=np.int64)
tdi = TimedeltaIndex(arr, copy=False)
assert tdi._data._ndarray.base is arr
def test_infer_from_tdi(self):
# GH#23539
# fast-path for inferring a frequency if the passed data already
# has one
tdi = timedelta_range("1 second", periods=10**7, freq="1s")
result = TimedeltaIndex(tdi, freq="infer")
assert result.freq == tdi.freq
# check that inferred_freq was not called by checking that the
# value has not been cached
assert "inferred_freq" not in getattr(result, "_cache", {})
def test_infer_from_tdi_mismatch(self):
# GH#23539
# fast-path for invalidating a frequency if the passed data already
# has one and it does not match the `freq` input
tdi = timedelta_range("1 second", periods=100, freq="1s")
msg = (
"Inferred frequency .* from passed values does "
"not conform to passed frequency"
)
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(tdi, freq="D")
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(tdi._data, freq="D")
def test_dt64_data_invalid(self):
# GH#23539
# passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64]
# raise as of GH#29794
dti = pd.date_range("2016-01-01", periods=3)
msg = "cannot be converted to timedelta64"
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(dti.tz_localize("Europe/Brussels"))
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(dti)
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(np.asarray(dti))
def test_float64_ns_rounded(self):
# GH#23539 without specifying a unit, floats are regarded as nanos,
# and fractional portions are truncated
tdi = TimedeltaIndex([2.3, 9.7])
expected = TimedeltaIndex([2, 9])
tm.assert_index_equal(tdi, expected)
# integral floats are non-lossy
tdi = TimedeltaIndex([2.0, 9.0])
expected = TimedeltaIndex([2, 9])
tm.assert_index_equal(tdi, expected)
# NaNs get converted to NaT
tdi = TimedeltaIndex([2.0, np.nan])
expected = TimedeltaIndex([Timedelta(nanoseconds=2), pd.NaT])
tm.assert_index_equal(tdi, expected)
def test_float64_unit_conversion(self):
# GH#23539
tdi = to_timedelta([1.5, 2.25], unit="D")
expected = TimedeltaIndex([Timedelta(days=1.5), Timedelta(days=2.25)])
tm.assert_index_equal(tdi, expected)
def test_construction_base_constructor(self):
arr = [Timedelta("1 days"), pd.NaT, Timedelta("3 days")]
tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, Timedelta("1 days")]
tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))
def test_constructor(self):
expected = TimedeltaIndex(
[
"1 days",
"1 days 00:00:05",
"2 days",
"2 days 00:00:02",
"0 days 00:00:03",
]
)
result = TimedeltaIndex(
[
"1 days",
"1 days, 00:00:05",
np.timedelta64(2, "D"),
timedelta(days=2, seconds=2),
pd.offsets.Second(3),
]
)
tm.assert_index_equal(result, expected)
def test_constructor_iso(self):
# GH #21877
expected = timedelta_range("1s", periods=9, freq="s")
durations = [f"P0DT0H0M{i}S" for i in range(1, 10)]
result = to_timedelta(durations)
tm.assert_index_equal(result, expected)
def test_timedelta_range_fractional_period(self):
msg = "periods must be an integer"
with pytest.raises(TypeError, match=msg):
timedelta_range("1 days", periods=10.5)
def test_constructor_coverage(self):
msg = "periods must be an integer, got foo"
with pytest.raises(TypeError, match=msg):
timedelta_range(start="1 days", periods="foo", freq="D")
msg = (
r"TimedeltaIndex\(\.\.\.\) must be called with a collection of some kind, "
"'1 days' was passed"
)
with pytest.raises(TypeError, match=msg):
TimedeltaIndex("1 days")
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(["1 days", "2 days", "3 days"])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit="D")
tm.assert_index_equal(result, expected)
from_ints = TimedeltaIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming freq
msg = (
"Inferred frequency None from passed values does not conform to "
"passed frequency D"
)
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["1 days", "2 days", "4 days"], freq="D")
msg = (
"Of the four parameters: start, end, periods, and freq, exactly "
"three must be specified"
)
with pytest.raises(ValueError, match=msg):
timedelta_range(periods=10, freq="D")
def test_constructor_name(self):
idx = timedelta_range(start="1 days", periods=1, freq="D", name="TEST")
assert idx.name == "TEST"
# GH10025
idx2 = TimedeltaIndex(idx, name="something else")
assert idx2.name == "something else"
def test_constructor_no_precision_raises(self):
# GH-24753, GH-24739
msg = "with no precision is not allowed"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["2000"], dtype="timedelta64")
msg = "The 'timedelta64' dtype has no unit. Please pass in"
with pytest.raises(ValueError, match=msg):
pd.Index(["2000"], dtype="timedelta64")
def test_constructor_wrong_precision_raises(self):
msg = "Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["2000"], dtype="timedelta64[D]")
# "timedelta64[us]" was unsupported pre-2.0, but now this works.
tdi = TimedeltaIndex(["2000"], dtype="timedelta64[us]")
assert tdi.dtype == "m8[us]"
def test_explicit_none_freq(self):
# Explicitly passing freq=None is respected
tdi = timedelta_range(1, periods=5)
assert tdi.freq is not None
result = TimedeltaIndex(tdi, freq=None)
assert result.freq is None
result = TimedeltaIndex(tdi._data, freq=None)
assert result.freq is None
def test_from_categorical(self):
tdi = timedelta_range(1, periods=5)
cat = pd.Categorical(tdi)
result = TimedeltaIndex(cat)
tm.assert_index_equal(result, tdi)
ci = pd.CategoricalIndex(tdi)
result = TimedeltaIndex(ci)
tm.assert_index_equal(result, tdi)
@pytest.mark.parametrize(
"unit,unit_depr",
[
("W", "w"),
("D", "d"),
("min", "MIN"),
("s", "S"),
("h", "H"),
("ms", "MS"),
("us", "US"),
],
)
def test_unit_deprecated(self, unit, unit_depr):
# GH#52536, GH#59051
msg = f"'{unit_depr}' is deprecated and will be removed in a future version."
expected = TimedeltaIndex([f"1{unit}", f"2{unit}"])
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = TimedeltaIndex([f"1{unit_depr}", f"2{unit_depr}"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
tdi = to_timedelta([1, 2], unit=unit_depr)
tm.assert_index_equal(tdi, expected)
| TestTimedeltaIndex |
python | huggingface__transformers | src/transformers/models/voxtral/processing_voxtral.py | {
"start": 1354,
"end": 1448
} | class ____(AudioKwargs, total=False):
max_source_positions: Optional[int]
| VoxtralAudioKwargs |
python | jazzband__django-model-utils | tests/models.py | {
"start": 13397,
"end": 13442
} | class ____(UUIDModel):
pass
| CustomUUIDModel |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/jit/rpc_test.py | {
"start": 10509,
"end": 10669
} | class ____(torch.nn.Module):
def forward(self) -> Tensor:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
| MyModuleInterface |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis04.py | {
"start": 315,
"end": 1582
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [46891776, 46893312]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_x_axis({"name": "XXX"})
chart.set_y_axis({"name": "YYY"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F821_5.py | {
"start": 38,
"end": 115
} | class ____:
def random_func(self) -> "InnerClass":
pass
| RandomClass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/specialization1.py | {
"start": 238,
"end": 833
} | class ____:
def __init__(self) -> None: ...
def m1(self, a: Moo[A]) -> None: ...
def m2(self, b: Moo[B]) -> None: ...
a = Moo[A]()
b = Moo[B]()
y = Foo()
y.m1(a)
# This should generate an error:
# Argument of type 'Moo[B]' cannot be assigned to parameter of type 'Moo[A]'
y.m1(b)
# This should generate an error:
# Argument of type 'Moo[A]' cannot be assigned to parameter of type 'Moo[B]'
y.m2(a)
y.m2(b)
def m3(c: Moo[C]):
pass
# This should generate an error:
# Type argument 'List[C]' cannot be assigned to type variable '_T1'
def m4(c: Moo[List[C]]):
pass
| Foo |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 51459,
"end": 52481
} | class ____(FieldValues):
"""
Valid and invalid values for `DateTimeField` when not using UTC as the timezone.
"""
@classmethod
def setup_class(cls):
# use class setup method, as class-level attribute will still be evaluated even if test is skipped
kolkata = ZoneInfo('Asia/Kolkata')
cls.valid_inputs = {
'2016-12-19T10:00:00': datetime.datetime(2016, 12, 19, 10, tzinfo=kolkata),
'2016-12-19T10:00:00+05:30': datetime.datetime(2016, 12, 19, 10, tzinfo=kolkata),
datetime.datetime(2016, 12, 19, 10): datetime.datetime(2016, 12, 19, 10, tzinfo=kolkata),
}
cls.invalid_inputs = {}
cls.outputs = {
datetime.datetime(2016, 12, 19, 10): '2016-12-19T10:00:00+05:30',
datetime.datetime(2016, 12, 19, 4, 30, tzinfo=utc): '2016-12-19T10:00:00+05:30',
}
cls.field = serializers.DateTimeField(default_timezone=kolkata)
@override_settings(TIME_ZONE='UTC', USE_TZ=True)
| TestTZWithDateTimeField |
python | faif__python-patterns | tests/behavioral/test_publish_subscribe.py | {
"start": 139,
"end": 2732
} | class ____(unittest.TestCase):
"""
Integration tests ~ provider class with as little mocking as possible.
"""
def test_subscriber_shall_be_attachable_to_subscriptions(cls):
subscription = "sub msg"
pro = Provider()
cls.assertEqual(len(pro.subscribers), 0)
sub = Subscriber("sub name", pro)
sub.subscribe(subscription)
cls.assertEqual(len(pro.subscribers[subscription]), 1)
def test_subscriber_shall_be_detachable_from_subscriptions(cls):
subscription = "sub msg"
pro = Provider()
sub = Subscriber("sub name", pro)
sub.subscribe(subscription)
cls.assertEqual(len(pro.subscribers[subscription]), 1)
sub.unsubscribe(subscription)
cls.assertEqual(len(pro.subscribers[subscription]), 0)
def test_publisher_shall_append_subscription_message_to_queue(cls):
"""msg_queue ~ Provider.notify(msg) ~ Publisher.publish(msg)"""
expected_msg = "expected msg"
pro = Provider()
pub = Publisher(pro)
Subscriber("sub name", pro)
cls.assertEqual(len(pro.msg_queue), 0)
pub.publish(expected_msg)
cls.assertEqual(len(pro.msg_queue), 1)
cls.assertEqual(pro.msg_queue[0], expected_msg)
def test_provider_shall_update_affected_subscribers_with_published_subscription(
cls,
):
pro = Provider()
pub = Publisher(pro)
sub1 = Subscriber("sub 1 name", pro)
sub1.subscribe("sub 1 msg 1")
sub1.subscribe("sub 1 msg 2")
sub2 = Subscriber("sub 2 name", pro)
sub2.subscribe("sub 2 msg 1")
sub2.subscribe("sub 2 msg 2")
with (
patch.object(sub1, "run") as mock_subscriber1_run,
patch.object(sub2, "run") as mock_subscriber2_run,
):
pro.update()
cls.assertEqual(mock_subscriber1_run.call_count, 0)
cls.assertEqual(mock_subscriber2_run.call_count, 0)
pub.publish("sub 1 msg 1")
pub.publish("sub 1 msg 2")
pub.publish("sub 2 msg 1")
pub.publish("sub 2 msg 2")
with (
patch.object(sub1, "run") as mock_subscriber1_run,
patch.object(sub2, "run") as mock_subscriber2_run,
):
pro.update()
expected_sub1_calls = [call("sub 1 msg 1"), call("sub 1 msg 2")]
mock_subscriber1_run.assert_has_calls(expected_sub1_calls)
expected_sub2_calls = [call("sub 2 msg 1"), call("sub 2 msg 2")]
mock_subscriber2_run.assert_has_calls(expected_sub2_calls)
| TestProvider |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 52015,
"end": 52405
} | class ____(PipesContextLoader):
"""Context loader that reads context from a JSON file on Unity Catalog Volumes."""
@contextmanager
def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:
path = _assert_env_param_type(params, "path", str, self.__class__)
with open(path) as f:
yield json.load(f)
| PipesUnityCatalogVolumesContextLoader |
python | gevent__gevent | src/gevent/events.py | {
"start": 8448,
"end": 9380
} | class ____(Interface):
"""
The event emitted when the memory usage drops below the
threshold after having previously been above it.
This event is emitted only the first time memory usage is detected
to be below the threshold after having previously been above it.
If memory usage climbs again, a `IMemoryUsageThresholdExceeded`
event will be broadcast, and then this event could be broadcast again.
This event is emitted in the monitor thread.
"""
mem_usage = Attribute("The current process memory usage, in bytes.")
max_allowed = Attribute("The maximum allowed memory usage, in bytes.")
max_memory_usage = Attribute("The memory usage that caused the previous "
"IMemoryUsageThresholdExceeded event.")
memory_info = Attribute("The tuple of memory usage stats return by psutil.")
@implementer(IMemoryUsageUnderThreshold)
| IMemoryUsageUnderThreshold |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/main.py | {
"start": 58756,
"end": 59209
} | class ____(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
# type: (Any, Any, Any) -> None
super().__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
| YAMLObjectMetaclass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 992589,
"end": 993970
} | class ____(sgqlc.types.Type):
"""A pointer to a repository at a specific revision embedded inside
another repository.
"""
__schema__ = github_schema
__field_names__ = ("branch", "git_url", "name", "name_raw", "path", "path_raw", "subproject_commit_oid")
branch = sgqlc.types.Field(String, graphql_name="branch")
"""The branch of the upstream submodule for tracking updates"""
git_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="gitUrl")
"""The git URL of the submodule repository"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the submodule in .gitmodules"""
name_raw = sgqlc.types.Field(sgqlc.types.non_null(Base64String), graphql_name="nameRaw")
"""The name of the submodule in .gitmodules (Base64-encoded)"""
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
"""The path in the superproject that this submodule is located in"""
path_raw = sgqlc.types.Field(sgqlc.types.non_null(Base64String), graphql_name="pathRaw")
"""The path in the superproject that this submodule is located in
(Base64-encoded)
"""
subproject_commit_oid = sgqlc.types.Field(GitObjectID, graphql_name="subprojectCommitOid")
"""The commit revision of the subproject repository being tracked by
the submodule
"""
| Submodule |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 1906,
"end": 16150
} | class ____:
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool(self, prefect_client):
pool_name = "my-pool"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t fake",
)
assert res.exit_code == 0
assert f"Created work pool {pool_name!r}" in res.output
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.name == pool_name
assert client_res.base_job_template == FAKE_DEFAULT_BASE_JOB_TEMPLATE
assert isinstance(client_res, WorkPool)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool_with_base_job_template(
self, prefect_client, mock_collection_registry
):
pool_name = "my-olympic-pool"
with temporary_settings({PREFECT_UI_URL: MOCK_PREFECT_UI_URL}):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"create",
pool_name,
"--type",
"process",
"--base-job-template",
Path(__file__).parent
/ "base-job-templates"
/ "process-worker.json",
],
expected_code=0,
expected_output_contains=[
"Created work pool 'my-olympic-pool'",
"/work-pools/work-pool/",
],
)
client_res = await prefect_client.read_work_pool(pool_name)
assert isinstance(client_res, WorkPool)
assert client_res.name == pool_name
assert client_res.base_job_template == {
"job_configuration": {"command": "{{ command }}", "name": "{{ name }}"},
"variables": {
"properties": {
"command": {
"description": "Command to run.",
"title": "Command",
"type": "string",
},
"name": {
"description": "Description.",
"title": "Name",
"type": "string",
},
},
"type": "object",
},
}
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool_with_empty_name(
self, prefect_client, mock_collection_registry
):
await run_sync_in_worker_thread(
invoke_and_assert,
"work-pool create '' -t process",
expected_code=1,
expected_output_contains=["name cannot be empty"],
)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool_name_conflict(
self, prefect_client, mock_collection_registry
):
pool_name = "my-pool"
await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t process",
expected_code=0,
expected_output_contains=[f"Created work pool {pool_name!r}"],
)
await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t process",
expected_code=1,
expected_output_contains=[
f"Work pool named {pool_name!r} already exists. Use --overwrite to update it."
],
)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_default_template(self, prefect_client):
pool_name = "my-pool"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t fake",
)
assert res.exit_code == 0
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.base_job_template == FAKE_DEFAULT_BASE_JOB_TEMPLATE
@pytest.mark.usefixtures("mock_collection_registry")
async def test_default_paused(self, prefect_client):
pool_name = "my-pool"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t process",
)
assert res.exit_code == 0
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.is_paused is False
@pytest.mark.usefixtures("mock_collection_registry")
async def test_paused_true(self, prefect_client):
pool_name = "my-pool"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --paused -t process",
)
assert res.exit_code == 0
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.is_paused is True
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool_from_registry(self, prefect_client):
pool_name = "fake-work"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --type fake",
)
assert res.exit_code == 0
assert f"Created work pool {pool_name!r}" in res.output
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.name == pool_name
assert client_res.base_job_template == FAKE_DEFAULT_BASE_JOB_TEMPLATE
assert client_res.type == "fake"
assert isinstance(client_res, WorkPool)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_process_work_pool(self, prefect_client):
pool_name = "process-work"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --type process",
)
assert res.exit_code == 0
assert f"Created work pool {pool_name!r}" in res.output
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.name == pool_name
assert (
client_res.base_job_template
== ProcessWorker.get_default_base_job_template()
)
assert client_res.type == "process"
assert isinstance(client_res, WorkPool)
@pytest.mark.usefixtures("mock_collection_registry")
def test_create_with_unsupported_type(self, monkeypatch):
invoke_and_assert(
["work-pool", "create", "my-pool", "--type", "unsupported"],
expected_code=1,
expected_output_contains=(
"Unknown work pool type 'unsupported'. Please choose from "
),
)
@pytest.mark.usefixtures("mock_collection_registry")
def test_create_non_interactive_missing_args(self):
invoke_and_assert(
["work-pool", "create", "no-type"],
expected_code=1,
expected_output=(
"When not using an interactive terminal, you must supply a `--type`"
" value."
),
)
@pytest.mark.usefixtures("interactive_console", "mock_collection_registry")
async def test_create_interactive_first_type(self, prefect_client):
work_pool_name = "test-interactive"
await run_sync_in_worker_thread(
invoke_and_assert,
["work-pool", "create", work_pool_name],
expected_code=0,
user_input=readchar.key.ENTER,
expected_output_contains=[f"Created work pool {work_pool_name!r}"],
)
client_res = await prefect_client.read_work_pool(work_pool_name)
assert client_res.name == work_pool_name
assert client_res.type == "fake"
assert isinstance(client_res, WorkPool)
@pytest.mark.usefixtures("interactive_console", "mock_collection_registry")
async def test_create_interactive_second_type(self, prefect_client):
work_pool_name = "test-interactive"
await run_sync_in_worker_thread(
invoke_and_assert,
["work-pool", "create", work_pool_name],
expected_code=0,
user_input=readchar.key.DOWN + readchar.key.ENTER,
expected_output_contains=[f"Created work pool {work_pool_name!r}"],
)
client_res = await prefect_client.read_work_pool(work_pool_name)
assert client_res.name == work_pool_name
assert client_res.type == "cloud-run:push"
assert isinstance(client_res, WorkPool)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_set_as_default(self, prefect_client):
settings_context = get_settings_context()
assert (
settings_context.profile.settings.get(PREFECT_DEFAULT_WORK_POOL_NAME)
is None
)
pool_name = "my-pool"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} -t process --set-as-default",
expected_output_contains=[
f"Created work pool {pool_name!r}",
(
f"Set {pool_name!r} as default work pool for profile"
f" {settings_context.profile.name!r}\n"
),
],
)
assert res.exit_code == 0
assert f"Created work pool {pool_name!r}" in res.output
client_res = await prefect_client.read_work_pool(pool_name)
assert client_res.name == pool_name
assert isinstance(client_res, WorkPool)
# reload the profile to pick up change
profile = load_profile(settings_context.profile.name)
assert profile.settings.get(PREFECT_DEFAULT_WORK_POOL_NAME) == pool_name
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_with_provision_infra(self, monkeypatch):
mock_provision = AsyncMock()
class MockProvisioner:
def __init__(self):
self._console = None
@property
def console(self):
return self._console
@console.setter
def console(self, value):
self._console = value
async def provision(self, *args, **kwargs):
await mock_provision(*args, **kwargs)
return FAKE_DEFAULT_BASE_JOB_TEMPLATE
monkeypatch.setattr(
"prefect.infrastructure.provisioners.get_infrastructure_provisioner_for_work_pool_type",
lambda *args: MockProvisioner(),
)
pool_name = "fake-work"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --type fake --provision-infra",
)
assert res.exit_code == 0
assert mock_provision.await_count == 1
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_with_provision_infra_unsupported(self):
pool_name = "fake-work"
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --type fake --provision-infra",
)
assert res.exit_code == 0
assert (
"Automatic infrastructure provisioning is not supported for 'fake' work"
" pools." in res.output
)
@pytest.mark.usefixtures("interactive_console", "mock_collection_registry")
async def test_create_prompt_table_only_displays_push_pool_types_using_provision_infra_flag(
self, prefect_client, monkeypatch
):
mock_provision = AsyncMock()
class MockProvisioner:
def __init__(self):
self._console = None
@property
def console(self):
return self._console
@console.setter
def console(self, value):
self._console = value
async def provision(self, *args, **kwargs):
await mock_provision(*args, **kwargs)
return FAKE_DEFAULT_BASE_JOB_TEMPLATE
monkeypatch.setattr(
"prefect.infrastructure.provisioners.get_infrastructure_provisioner_for_work_pool_type",
lambda *args: MockProvisioner(),
)
await run_sync_in_worker_thread(
invoke_and_assert,
["work-pool", "create", "test-interactive", "--provision-infra"],
expected_code=0,
user_input=readchar.key.ENTER,
expected_output_contains=[
"What type of work pool infrastructure would you like to use?",
"Prefect Cloud Run: Push",
],
expected_output_does_not_contain=[
"Prefect Fake",
"Prefect Agent",
],
)
@pytest.mark.usefixtures("mock_collection_registry")
async def test_create_work_pool_with_overwrite(self, prefect_client):
pool_name = "overwrite-pool"
# Create initial work pool
await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --type process",
expected_code=0,
expected_output_contains=[f"Created work pool {pool_name!r}"],
)
initial_pool = await prefect_client.read_work_pool(pool_name)
assert initial_pool.name == pool_name
assert not initial_pool.is_paused
# Attempt to overwrite the work pool (updating is_paused)
await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --paused --overwrite",
expected_code=0,
expected_output_contains=[f"Updated work pool {pool_name!r}"],
)
updated_pool = await prefect_client.read_work_pool(pool_name)
assert updated_pool.name == pool_name
assert updated_pool.id == initial_pool.id
assert updated_pool.is_paused
await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool create {pool_name} --paused",
expected_code=1,
expected_output_contains=[
f"Work pool named {pool_name!r} already exists. Use --overwrite to update it."
],
)
| TestCreate |
python | facebook__pyre-check | client/libcst_vendored_visitors/_apply_type_annotations.py | {
"start": 3933,
"end": 6348
} | class ____:
"""
Represents all of the annotation information we might add to
a class:
- All data is keyed on the qualified name relative to the module root
- The ``functions`` field also keys on the signature so that we
do not apply stub types where the signature is incompatible.
The idea is that
- ``functions`` contains all function and method type
information from the stub, and the qualifier for a method includes
the containing class names (e.g. "Cat.meow")
- ``attributes`` similarly contains all globals
and class-level attribute type information.
- The ``class_definitions`` field contains all of the classes
defined in the stub. Most of these classes will be ignored in
downstream logic (it is *not* used to annotate attributes or
method), but there are some cases like TypedDict where a
typing-only class needs to be injected.
- The field ``typevars`` contains the assign statement for all
type variables in the stub, and ``names`` tracks
all of the names used in annotations; together these fields
tell us which typevars should be included in the codemod
(all typevars that appear in annotations.)
"""
# TODO: consider simplifying this in a few ways:
# - We could probably just inject all typevars, used or not.
# It doesn't seem to me that our codemod needs to act like
# a linter checking for unused names.
# - We could probably decide which classes are typing-only
# in the visitor rather than the codemod, which would make
# it easier to reason locally about (and document) how the
# class_definitions field works.
functions: Dict[FunctionKey, FunctionAnnotation]
attributes: Dict[str, cst.Annotation]
class_definitions: Dict[str, cst.ClassDef]
typevars: Dict[str, cst.Assign]
names: Set[str]
@classmethod
def empty(cls) -> "Annotations":
return Annotations({}, {}, {}, {}, set())
def update(self, other: "Annotations") -> None:
self.functions.update(other.functions)
self.attributes.update(other.attributes)
self.class_definitions.update(other.class_definitions)
self.typevars.update(other.typevars)
self.names.update(other.names)
def finish(self) -> None:
self.typevars = {k: v for k, v in self.typevars.items() if k in self.names}
| Annotations |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 42764,
"end": 44842
} | class ____(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(
self,
shape: torch.Size,
device: Union[torch.device, str],
dtype: torch.dtype,
mask: Optional[Tensor] = None,
) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Modified from transformers.models.detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention
| Mask2FormerSinePositionEmbedding |
python | numba__numba | numba/core/dispatcher.py | {
"start": 45575,
"end": 49307
} | class ____(LiftedCode):
can_cache = True
def _reduce_extras(self):
return dict(output_types=self.output_types)
@property
def _numba_type_(self):
return types.Dispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
pysig = None
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
def compile(self, sig):
# this is similar to LiftedLoop's compile but does not have the
# "fallback" to object mode part.
with ExitStack() as scope:
cres = None
def cb_compiler(dur):
if cres is not None:
self._callback_add_compiler_timer(dur, cres)
def cb_llvm(dur):
if cres is not None:
self._callback_add_llvm_timer(dur, cres)
scope.enter_context(ev.install_timer("numba:compiler_lock",
cb_compiler))
scope.enter_context(ev.install_timer("numba:llvm_lock", cb_llvm))
scope.enter_context(global_compiler_lock)
# Use counter to track recursion compilation depth
with self._compiling_counter:
# XXX this is mostly duplicated from Dispatcher.
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
# (e.g. if another thread compiled it before we got the lock)
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
self._pre_compile(args, return_type, flags)
# Clone IR to avoid (some of the) mutation in the rewrite pass
cloned_func_ir = self.func_ir.copy()
ev_details = dict(
dispatcher=self,
args=args,
return_type=return_type,
)
with ev.trigger_event("numba:compile", data=ev_details):
cres = compiler.compile_ir(typingctx=self.typingctx,
targetctx=self.targetctx,
func_ir=cloned_func_ir,
args=args,
return_type=return_type,
flags=flags, locals=self.locals,
lifted=(),
lifted_from=self.lifted_from,
is_lifted_loop=True,)
# Check typing error if object mode is used
if (cres.typing_error is not None and
not flags.enable_pyobject):
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
| LiftedWith |
python | google__jax | docs/autodidax.py | {
"start": 75181,
"end": 87104
} | class ____(Trace):
def new_arg(self, pval: PartialVal) -> Any:
return PartialEvalTracer(self, pval, LambdaBindingRecipe())
def lift(self, val: Any) -> PartialEvalTracer:
return PartialEvalTracer(self, PartialVal.known(val), None)
pure = lift
def instantiate_const(self, tracer: PartialEvalTracer) -> PartialEvalTracer:
if tracer.pval.is_unknown:
return tracer
else:
pval = PartialVal.unknown(raise_to_shaped(tracer.aval))
return PartialEvalTracer(self, pval, ConstRecipe(tracer.pval.const))
def process_primitive(self, primitive, tracers, params):
if all(t.pval.is_known for t in tracers):
return bind(primitive, *map(full_lower, tracers), **params)
rule = partial_eval_rules.get(primitive)
if rule: return rule(self, tracers, **params)
tracers_in = [self.instantiate_const(t) for t in tracers]
avals_in = [t.aval for t in tracers_in]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
tracers_out = [PartialEvalTracer(self, PartialVal.unknown(aval), None)
for aval in avals_out]
eqn = JaxprEqnRecipe(primitive, tracers_in, params, avals_out,
map(ref, tracers_out))
for t in tracers_out: t.recipe = eqn
return tracers_out
partial_eval_rules = {}
# -
# Now that we can build graph representations of jaxprs with `PartialEvalTrace`,
# we need a mechanism to convert the graph representation to a standard jaxpr.
# The jaxpr corresponds to a topological sort of the graph.
# +
def tracers_to_jaxpr(tracers_in: list[PartialEvalTracer],
tracers_out: list[PartialEvalTracer]):
tracer_to_var: dict[int, Var] = {id(t): Var(raise_to_shaped(t.aval))
for t in tracers_in}
constvar_to_val: dict[int, Any] = {}
constid_to_var: dict[int, Var] = {}
processed_eqns: set[int] = set()
eqns: list[JaxprEqn] = []
for t in toposort(tracers_out, tracer_parents):
if isinstance(t.recipe, LambdaBindingRecipe):
assert id(t) in set(map(id, tracers_in))
elif isinstance(t.recipe, ConstRecipe):
val = t.recipe.val
var = constid_to_var.get(id(val))
if var is None:
aval = raise_to_shaped(get_aval(val))
var = constid_to_var[id(val)] = Var(aval)
constvar_to_val[var] = val
tracer_to_var[id(t)] = var
elif isinstance(t.recipe, JaxprEqnRecipe):
if id(t.recipe) not in processed_eqns:
eqns.append(recipe_to_eqn(tracer_to_var, t.recipe))
processed_eqns.add(id(t.recipe))
else:
raise TypeError(t.recipe)
constvars, constvals = unzip2(constvar_to_val.items())
in_binders = constvars + [tracer_to_var[id(t)] for t in tracers_in]
out_vars = [tracer_to_var[id(t)] for t in tracers_out]
jaxpr = Jaxpr(in_binders, eqns, out_vars)
typecheck_jaxpr(jaxpr)
return jaxpr, constvals
def recipe_to_eqn(tracer_to_var: dict[int, Var], recipe: JaxprEqnRecipe
) -> JaxprEqn:
inputs = [tracer_to_var[id(t)] for t in recipe.tracers_in]
out_binders = [Var(aval) for aval in recipe.avals_out]
for t_ref, var in zip(recipe.tracer_refs_out, out_binders):
if t_ref() is not None: tracer_to_var[id(t_ref())] = var
return JaxprEqn(recipe.prim, inputs, recipe.params, out_binders)
def tracer_parents(t: PartialEvalTracer) -> list[PartialEvalTracer]:
return t.recipe.tracers_in if isinstance(t.recipe, JaxprEqnRecipe) else []
# + tags=["hide-input"]
def toposort(out_nodes: list[Any], parents: Callable[[Any], list[Any]]):
if not out_nodes: return []
out_nodes = remove_duplicates(out_nodes)
child_counts = {}
stack = list(out_nodes)
while stack:
node = stack.pop()
if id(node) in child_counts:
child_counts[id(node)] += 1
else:
child_counts[id(node)] = 1
stack.extend(parents(node))
for node in out_nodes:
child_counts[id(node)] -= 1
sorted_nodes = []
childless_nodes = [node for node in out_nodes if not child_counts[id(node)]]
while childless_nodes:
node = childless_nodes.pop()
sorted_nodes.append(node)
for parent in parents(node):
if child_counts[id(parent)] == 1:
childless_nodes.append(parent)
else:
child_counts[id(parent)] -= 1
sorted_nodes = sorted_nodes[::-1]
check_toposort(sorted_nodes, parents)
return sorted_nodes
def remove_duplicates(lst):
seen = set()
return [x for x in lst if id(x) not in seen and not seen.add(id(x))]
def check_toposort(nodes: list[Any], parents: Callable[[Any], list[Any]]):
seen = set()
for node in nodes:
assert all(id(parent) in seen for parent in parents(node))
seen.add(id(node))
# -
# Now we can linearize!
y, sin_lin = linearize(sin, 3.)
print(y, sin(3.))
print(sin_lin(1.), cos(3.))
# To handle `linearize`-of-`jit`, we still need to write a partial evaluation
# rule for `xla_call_p`. Other than tracer bookkeeping, the main task is to
# perform partial evaluation of a jaxpr, 'unzipping' it into two jaxprs.
#
# There are actually two rules to write: one for trace-time partial evaluation,
# which we'll call `xla_call_partial_eval`, and one for partial evaluation of
# jaxprs, which we'll call `xla_call_peval_eqn`.
# +
def xla_call_partial_eval(trace, tracers, *, jaxpr, num_consts):
del num_consts # Unused
in_unknowns = [not t.pval.is_known for t in tracers]
jaxpr1, jaxpr2, out_unknowns, num_res = partial_eval_jaxpr(jaxpr, in_unknowns)
known_tracers, unknown_tracers = partition_list(in_unknowns, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind(xla_call_p, *known_vals, jaxpr=jaxpr1, num_consts=0)
outs1, res = split_list(outs1_res, len(jaxpr1.outs) - num_res)
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in jaxpr2.outs]
eqn = JaxprEqnRecipe(xla_call_p, res_tracers + unknown_tracers,
dict(jaxpr=jaxpr2, num_consts=0),
[v.aval for v in jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_unknowns, outs1, outs2)
partial_eval_rules[xla_call_p] = xla_call_partial_eval
def partial_eval_jaxpr(jaxpr: Jaxpr, in_unknowns: list[bool],
instantiate: list[bool] | None = None,
) -> tuple[Jaxpr, Jaxpr, list[bool], int]:
env: dict[Var, bool] = {}
residuals: set[Var] = set()
def read(x: Atom) -> bool:
return type(x) is Var and env[x]
def write(unk: bool, v: Var) -> None:
env[v] = unk
def new_res(x: Atom) -> Atom:
if type(x) is Var: residuals.add(x)
return x
eqns1, eqns2 = [], []
map(write, in_unknowns, jaxpr.in_binders)
for eqn in jaxpr.eqns:
unks_in = map(read, eqn.inputs)
rule = partial_eval_jaxpr_rules.get(eqn.primitive)
if rule:
eqn1, eqn2, unks_out, res = rule(unks_in, eqn)
eqns1.append(eqn1); eqns2.append(eqn2); residuals.update(res)
map(write, unks_out, eqn.out_binders)
elif any(unks_in):
inputs = [v if unk else new_res(v) for unk, v in zip(unks_in, eqn.inputs)]
eqns2.append(JaxprEqn(eqn.primitive, inputs, eqn.params, eqn.out_binders))
map(partial(write, True), eqn.out_binders)
else:
eqns1.append(eqn)
map(partial(write, False), eqn.out_binders)
out_unknowns = map(read, jaxpr.outs)
if instantiate is not None:
for v, uk, inst in zip(jaxpr.outs, out_unknowns, instantiate):
if inst and not uk: new_res(v)
out_unknowns = map(op.or_, out_unknowns, instantiate)
residuals, num_res = list(residuals), len(residuals)
assert all(type(v) is Var for v in residuals), residuals
ins1, ins2 = partition_list(in_unknowns, jaxpr.in_binders)
outs1, outs2 = partition_list(out_unknowns, jaxpr.outs)
jaxpr1 = Jaxpr(ins1, eqns1, outs1 + residuals)
jaxpr2 = Jaxpr(residuals + ins2, eqns2, outs2)
typecheck_partial_eval_jaxpr(jaxpr, in_unknowns, out_unknowns, jaxpr1, jaxpr2)
return jaxpr1, jaxpr2, out_unknowns, num_res
def typecheck_partial_eval_jaxpr(jaxpr, unks_in, unks_out, jaxpr1, jaxpr2):
jaxprty = typecheck_jaxpr(jaxpr) # (a1, a2) -> (b1, b2 )
jaxpr1ty = typecheck_jaxpr(jaxpr1) # a1 -> (b1, res)
jaxpr2ty = typecheck_jaxpr(jaxpr2) # (res, a2) -> b2
a1, a2 = partition_list(unks_in, jaxprty.in_types)
b1, b2 = partition_list(unks_out, jaxprty.out_types)
b1_, res = split_list(jaxpr1ty.out_types, len(b1))
res_, a2_ = split_list(jaxpr2ty.in_types, len(res))
b2_ = jaxpr2ty.out_types
if jaxpr1ty.in_types != a1: raise TypeError
if jaxpr2ty.out_types != b2: raise TypeError
if b1 != b1_: raise TypeError
if res != res_: raise TypeError
if a2 != a2_: raise TypeError
if b2 != b2_: raise TypeError
partial_eval_jaxpr_rules = {}
def xla_call_peval_eqn(unks_in: list[bool], eqn: JaxprEqn,
) -> tuple[JaxprEqn, JaxprEqn, list[bool], list[Var]]:
jaxpr = eqn.params['jaxpr']
jaxpr1, jaxpr2, unks_out, num_res = partial_eval_jaxpr(jaxpr, unks_in)
ins1, ins2 = partition_list(unks_in, eqn.inputs)
out_binders1, out_binders2 = partition_list(unks_out, eqn.out_binders)
residuals = [Var(v.aval) for v in jaxpr2.in_binders[:num_res]]
eqn1 = JaxprEqn(xla_call_p, ins1, dict(jaxpr=jaxpr1, num_consts=0),
out_binders1 + residuals)
eqn2 = JaxprEqn(xla_call_p, residuals + ins2,
dict(jaxpr=jaxpr2, num_consts=0), out_binders2)
return eqn1, eqn2, unks_out, residuals
partial_eval_jaxpr_rules[xla_call_p] = xla_call_peval_eqn
# -
# With that, we can compose `linearize` and `jit` however we like:
# +
@jit
def f(x):
y = sin(x) * 2.
z = - y + x
return z
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# +
@jit
def f(x):
y = sin(x) * 2.
z = g(x, y)
return z
@jit
def g(x, y):
return cos(x) + y
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# -
# ### `vjp` and `grad`
#
# The `vjp` transformation works a lot like linearize. Its type signature is
# analogous:
#
# ```
# linearize : (a -> b) -> a -> (b, T a -o T b)
# vjp : (a -> b) -> a -> (b, T b -o T a)
# ```
#
# The only difference is that we transpose the linear part of the computation
# before returning it, so that it goes from type `T a -o T b` to type `T b -o T
# a`. That is, we'll implement `vjp` as, essentially,
#
# ```
# def vjp(f, x):
# y, f_lin = linearize(f, x)
# f_vjp = lambda y_bar: transpose(f_lin)(y_bar)
# return y, f_vjp
# ```
#
# Since we have the linear computation as a jaxpr, not just a Python callable,
# we can implement the transpose transformation as a jaxpr interpreter.
# +
def vjp_flat(f, *primals_in):
pvals_in = ([PartialVal.known(x) for x in primals_in] +
[PartialVal.unknown(vspace(get_aval(x))) for x in primals_in])
primal_pvals_in, tangent_pvals_in = split_half(pvals_in)
def f_jvp(*primals_tangents_in):
primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in))
return [*primals_out, *tangents_out]
jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) # linearize
primal_pvals, _ = split_half(pvals_out)
assert all(pval.is_known for pval in primal_pvals)
primals_out = [pval.const for pval in primal_pvals]
transpose_inputs = consts + [UndefPrimal(p.aval) for p in tangent_pvals_in]
f_vjp = lambda *cts: eval_jaxpr_transposed(jaxpr, transpose_inputs, cts)
return primals_out, f_vjp
def vjp(f, *primals_in):
primals_in_flat, in_tree = tree_flatten(primals_in)
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, f_vjp_flat = vjp_flat(f, *primals_in_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
def f_vjp(*cotangents_out):
cotangents_out_flat, _ = tree_flatten(cotangents_out)
cotangents_in_flat = f_vjp_flat(*cotangents_out_flat)
return tree_unflatten(in_tree, cotangents_in_flat)
return primals_out, f_vjp
| PartialEvalTrace |
python | apache__airflow | providers/postgres/tests/unit/postgres/dialects/test_postgres.py | {
"start": 996,
"end": 4574
} | class ____:
def setup_method(self):
def get_records(sql, parameters):
assert isinstance(sql, str)
assert "hollywood" in parameters, "Missing 'schema' in parameters"
assert "actors" in parameters, "Missing 'table' in parameters"
if "kcu." in sql:
return [("id",)]
return [
("id", None, "NO", None, "ALWAYS", "YES"),
("name", None, "YES", None, "NEVER", "NO"),
("firstname", None, "YES", None, "NEVER", "NO"),
("age", None, "YES", None, "NEVER", "NO"),
]
self.test_db_hook = MagicMock(placeholder="?", spec=DbApiHook)
self.test_db_hook.get_records.side_effect = get_records
self.test_db_hook.insert_statement_format = "INSERT INTO {} {} VALUES ({})"
self.test_db_hook.escape_word_format = '"{}"'
self.test_db_hook.escape_column_names = False
def test_placeholder(self):
assert PostgresDialect(self.test_db_hook).placeholder == "?"
def test_get_column_names(self):
assert PostgresDialect(self.test_db_hook).get_column_names("hollywood.actors") == [
"id",
"name",
"firstname",
"age",
]
def test_get_target_fields(self):
assert PostgresDialect(self.test_db_hook).get_target_fields("hollywood.actors") == [
"name",
"firstname",
"age",
]
def test_get_primary_keys(self):
assert PostgresDialect(self.test_db_hook).get_primary_keys("hollywood.actors") == ["id"]
def test_generate_replace_sql(self):
values = [
{"id": 1, "name": "Stallone", "firstname": "Sylvester", "age": "78"},
{"id": 2, "name": "Statham", "firstname": "Jason", "age": "57"},
{"id": 3, "name": "Li", "firstname": "Jet", "age": "61"},
{"id": 4, "name": "Lundgren", "firstname": "Dolph", "age": "66"},
{"id": 5, "name": "Norris", "firstname": "Chuck", "age": "84"},
]
target_fields = ["id", "name", "firstname", "age"]
sql = PostgresDialect(self.test_db_hook).generate_replace_sql(
"hollywood.actors", values, target_fields
)
assert (
sql
== """
INSERT INTO hollywood.actors (id, name, firstname, age) VALUES (?,?,?,?,?) ON CONFLICT (id) DO UPDATE SET name = excluded.name, firstname = excluded.firstname, age = excluded.age
""".strip()
)
def test_generate_replace_sql_when_escape_column_names_is_enabled(self):
values = [
{"id": 1, "name": "Stallone", "firstname": "Sylvester", "age": "78"},
{"id": 2, "name": "Statham", "firstname": "Jason", "age": "57"},
{"id": 3, "name": "Li", "firstname": "Jet", "age": "61"},
{"id": 4, "name": "Lundgren", "firstname": "Dolph", "age": "66"},
{"id": 5, "name": "Norris", "firstname": "Chuck", "age": "84"},
]
target_fields = ["id", "name", "firstname", "age"]
self.test_db_hook.escape_column_names = True
sql = PostgresDialect(self.test_db_hook).generate_replace_sql(
"hollywood.actors", values, target_fields
)
assert (
sql
== """
INSERT INTO hollywood.actors ("id", "name", "firstname", "age") VALUES (?,?,?,?,?) ON CONFLICT ("id") DO UPDATE SET "name" = excluded."name", "firstname" = excluded."firstname", "age" = excluded."age"
""".strip()
)
| TestPostgresDialect |
python | getsentry__sentry | src/sentry/users/services/user/model.py | {
"start": 3958,
"end": 4060
} | class ____(IntEnum): # annoying
SIMPLE = 0
DETAILED = 1
SELF_DETAILED = 2
| UserSerializeType |
python | pexpect__pexpect | tests/deprecated_test_filedescriptor.py | {
"start": 1026,
"end": 2734
} | class ____(PexpectTestCase.PexpectTestCase):
def setUp(self):
print(self.id())
PexpectTestCase.PexpectTestCase.setUp(self)
def test_fd (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_maxread (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
s.maxread = 100
s.expect('2')
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_fd_isalive (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
assert s.isalive()
os.close (fd)
assert not s.isalive()
def test_fd_isatty (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
assert not s.isatty()
os.close(fd)
### def test_close_does_not_close_fd (self):
### '''Calling close() on a pexpect.spawn object should not
### close the underlying file descriptor.
### '''
### fd = os.open ('TESTDATA.txt', os.O_RDONLY)
### s = pexpect.spawn (fd)
### try:
### s.close()
### self.fail('Expected an Exception.')
### except pexpect.ExceptionPexpect, e:
### pass
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(ExpectTestCase)
#fout = open('delete_me_1','wb')
#fout.write(the_old_way)
#fout.close
#fout = open('delete_me_2', 'wb')
#fout.write(the_new_way)
#fout.close
| ExpectTestCase |
python | donnemartin__interactive-coding-challenges | linked_lists/palindrome/test_palindrome.py | {
"start": 18,
"end": 1291
} | class ____(unittest.TestCase):
def test_palindrome(self):
print('Test: Empty list')
linked_list = MyLinkedList()
self.assertEqual(linked_list.is_palindrome(), False)
print('Test: Single element list')
head = Node(1)
linked_list = MyLinkedList(head)
self.assertEqual(linked_list.is_palindrome(), False)
print('Test: Two element list, not a palindrome')
linked_list.append(2)
self.assertEqual(linked_list.is_palindrome(), False)
print('Test: General case: Palindrome with even length')
head = Node('a')
linked_list = MyLinkedList(head)
linked_list.append('b')
linked_list.append('b')
linked_list.append('a')
self.assertEqual(linked_list.is_palindrome(), True)
print('Test: General case: Palindrome with odd length')
head = Node(1)
linked_list = MyLinkedList(head)
linked_list.append(2)
linked_list.append(3)
linked_list.append(2)
linked_list.append(1)
self.assertEqual(linked_list.is_palindrome(), True)
print('Success: test_palindrome')
def main():
test = TestPalindrome()
test.test_palindrome()
if __name__ == '__main__':
main()
| TestPalindrome |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 13326,
"end": 16049
} | class ____(Data2VecVisionSelfAttention):
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
attn_bias = None
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attn_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
if attn_bias is None:
attn_bias = relative_position_bias
else:
attn_bias += relative_position_bias
scaling = 1 / math.sqrt(self.attention_head_size)
context_layer = torch.nn.functional.scaled_dot_product_attention(
query_layer,
key_layer,
value_layer,
attn_mask=attn_bias,
dropout_p=self.config.attention_probs_dropout_prob if self.training else 0.0,
is_causal=False,
scale=scaling,
)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, None
# Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision
| Data2VecVisionSdpaSelfAttention |
python | spack__spack | lib/spack/spack/traverse.py | {
"start": 2484,
"end": 3095
} | class ____:
"""A visitor that traverses each node once."""
def __init__(self, visitor, key=id, visited=None):
self.visitor = visitor
self.key = key
self.visited = set() if visited is None else visited
def accept(self, item):
# Covering nodes means: visit nodes once and only once.
key = self.key(item.edge.spec)
if key in self.visited:
return False
accept = self.visitor.accept(item)
self.visited.add(key)
return accept
def neighbors(self, item):
return self.visitor.neighbors(item)
| CoverNodesVisitor |
python | django__django | tests/introspection/models.py | {
"start": 272,
"end": 416
} | class ____(models.Model):
city = models.ForeignKey(City, models.CASCADE, primary_key=True)
name = models.CharField(max_length=50)
| District |
python | FactoryBoy__factory_boy | examples/flask_alchemy/demoapp_factories.py | {
"start": 370,
"end": 537
} | class ____(BaseFactory):
class Meta:
model = demoapp.UserLog
message = factory.fuzzy.FuzzyText()
user = factory.SubFactory(UserFactory)
| UserLogFactory |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 20721,
"end": 20856
} | class ____:
"""Contains parsed declaration of shared utility function"""
name: str
ret: str
params: str
| SharedFunctionDecl |
python | falconry__falcon | falcon/asgi/request.py | {
"start": 1515,
"end": 35852
} | class ____(request.Request):
"""Represents a client's HTTP request.
Note:
`Request` is not meant to be instantiated directly by responders.
Args:
scope (dict): ASGI HTTP connection scope passed in from the server (see
also: `Connection Scope`_).
receive (awaitable): ASGI awaitable callable that will yield a new
event dictionary when one is available.
Keyword Args:
first_event (dict): First ASGI event received from the client,
if one was preloaded (default ``None``).
options (falcon.request.RequestOptions): Set of global request options
passed from the App handler.
.. _Connection Scope:
https://asgi.readthedocs.io/en/latest/specs/www.html#connection-scope
"""
__slots__ = [
'_asgi_headers',
# '_asgi_server_cached',
# '_cached_headers',
'_first_event',
'_receive',
# '_stream',
'scope',
]
# PERF(vytas): These boilerplates values will be shadowed when set on an
# instance. Avoiding a statement per each of those values allows to speed
# up __init__ substantially.
_asgi_server_cached: tuple[str, int] | None = None
_cached_access_route: list[str] | None = None
_cached_forwarded: list[Forwarded] | None = None
_cached_forwarded_prefix: str | None = None
_cached_forwarded_uri: str | None = None
_cached_headers: dict[str, str] | None = None
# NOTE: _cached_headers_lower is not used
_cached_prefix: str | None = None
_cached_relative_uri: str | None = None
_cached_uri: str | None = None
_media: UnsetOr[Any] = _UNSET
_media_error: Exception | None = None
_stream: BoundedStream | None = None
scope: dict[str, Any]
"""Reference to the ASGI HTTP connection scope passed in
from the server (see also: `Connection Scope`_).
.. _Connection Scope:
https://asgi.readthedocs.io/en/latest/specs/www.html#connection-scope
"""
is_websocket: bool
"""Set to ``True`` IFF this request was made as part of a WebSocket handshake."""
def __init__(
self,
scope: dict[str, Any],
receive: AsgiReceive,
first_event: AsgiEvent | None = None,
options: request.RequestOptions | None = None,
):
# =====================================================================
# Prepare headers
# =====================================================================
req_headers: dict[bytes, bytes] = {}
for header_name, header_value in scope['headers']:
# NOTE(kgriffs): According to ASGI 3.0, header names are always
# lowercased, and both name and value are byte strings. Although
# technically header names and values are restricted to US-ASCII
# we decode later (just-in-time) using the default 'utf-8' because
# it is a little faster than passing an encoding option (except
# under Cython).
#
# The reason we wait to decode is that the typical app will not
# need to decode all request headers, and we usually can just
# leave the header name as a byte string and look it up that way.
#
# NOTE(kgriffs): There are no standard request headers that
# allow multiple instances to appear in the request while also
# disallowing list syntax.
if (
header_name not in req_headers
or header_name in _SINGLETON_HEADERS_BYTESTR
):
req_headers[header_name] = header_value
else:
req_headers[header_name] += b',' + header_value
self._asgi_headers: dict[bytes, bytes] = req_headers
# PERF(vytas): Fall back to class variable(s) when unset.
# self._cached_headers = None
# =====================================================================
# Misc.
# =====================================================================
# PERF(vytas): Fall back to class variable(s) when unset.
# self._asgi_server_cached = None # Lazy
self.scope = scope
self.is_websocket = scope['type'] == 'websocket'
self.options = options if options is not None else request.RequestOptions()
self.method = 'GET' if self.is_websocket else scope['method']
self.uri_template = None
# PERF(vytas): Fall back to class variable(s) when unset.
# self._media = _UNSET
# self._media_error = None
# TODO(kgriffs): ASGI does not specify whether 'path' may be empty,
# as was allowed for WSGI.
path = scope['path'] or '/'
if (
self.options.strip_url_path_trailing_slash
and len(path) != 1
and path.endswith('/')
):
self.path = path[:-1]
else:
self.path = path
query_string = scope['query_string'].decode()
self.query_string = query_string
if query_string:
self._params = parse_query_string(
query_string,
keep_blank=self.options.keep_blank_qs_values,
csv=self.options.auto_parse_qs_csv,
)
else:
self._params = {}
# PERF(vytas): Fall back to class variable(s) when unset.
# self._cached_access_route = None
# self._cached_forwarded = None
# self._cached_forwarded_prefix = None
# self._cached_forwarded_uri = None
# self._cached_prefix = None
# self._cached_relative_uri = None
# self._cached_uri = None
if self.method == 'GET':
# NOTE(vytas): We do not really expect the Content-Type to be
# non-ASCII, however we assume ISO-8859-1 here for maximum
# compatibility with WSGI.
# PERF(kgriffs): Normally we expect no Content-Type header, so
# use this pattern which is a little bit faster than dict.get()
if b'content-type' in req_headers:
self.content_type = req_headers[b'content-type'].decode('latin1')
else:
self.content_type = None
else:
# PERF(kgriffs): This is the most performant pattern when we expect
# the key to be present most of the time.
try:
self.content_type = req_headers[b'content-type'].decode('latin1')
except KeyError:
self.content_type = None
# =====================================================================
# The request body stream is created lazily
# =====================================================================
# NOTE(kgriffs): The ASGI spec states that "you should not trigger
# on a connection opening alone". I take this to mean that the app
# should have the opportunity to respond with a 401, for example,
# without having to first read any of the body. This is accomplished
# in Falcon by only reading the first data event when the app attempts
# to read from req.stream for the first time, and in uvicorn
# (for example) by not confirming a 100 Continue request unless
# the app calls receive() to read the request body.
# PERF(vytas): Fall back to class variable(s) when unset.
# self._stream = None
self._receive: AsgiReceive = receive
self._first_event: AsgiEvent | None = first_event
# =====================================================================
# Create a context object
# =====================================================================
self.context = self.context_type()
# ------------------------------------------------------------------------
# Properties
#
# Much of the logic from the ASGI Request class is duplicated in these
# property implementations; however, to make the code more DRY we would
# have to factor out the common logic, which would add overhead to these
# properties and slow them down. They are simple enough that we should
# be able to keep them in sync with the WSGI side without too much
# trouble.
# ------------------------------------------------------------------------
auth: str | None = asgi_helpers._header_property('Authorization')
expect: str | None = asgi_helpers._header_property('Expect')
if_range: str | None = asgi_helpers._header_property('If-Range')
referer: str | None = asgi_helpers._header_property('Referer')
user_agent: str | None = asgi_helpers._header_property('User-Agent')
@property
def accept(self) -> str:
# NOTE(kgriffs): Per RFC, a missing accept header is
# equivalent to '*/*'
try:
return self._asgi_headers[b'accept'].decode('latin1') or '*/*'
except KeyError:
return '*/*'
@property
def content_length(self) -> int | None:
try:
value = self._asgi_headers[b'content-length']
except KeyError:
return None
try:
# PERF(vytas): int() also works with a bytestring argument.
value_as_int = int(value)
except ValueError:
# PERF(vytas): Check for an empty value in the except clause,
# because we do not expect ASGI servers to inject any headers
# that the client did not provide.
# NOTE(kgriffs): Normalize an empty value to behave as if
# the header were not included; wsgiref, at least, inserts
# an empty CONTENT_LENGTH value if the request does not
# set the header. Gunicorn and uWSGI do not do this, but
# others might if they are trying to match wsgiref's
# behavior too closely.
if not value:
return None
msg = 'The value of the header must be a number.'
raise errors.HTTPInvalidHeader(msg, 'Content-Length')
if value_as_int < 0:
msg = 'The value of the header must be a positive number.'
raise errors.HTTPInvalidHeader(msg, 'Content-Length')
return value_as_int
@property
def stream(self) -> BoundedStream: # type: ignore[override]
"""File-like input object for reading the body of the request, if any."""
if self.is_websocket:
raise errors.UnsupportedError(
'ASGI does not support reading the WebSocket handshake request body.'
)
if not self._stream:
self._stream = BoundedStream(
self._receive,
first_event=self._first_event,
content_length=self.content_length,
)
return self._stream
# NOTE(kgriffs): This is provided as an alias in order to ease migration
# from WSGI, but is not documented since we do not want people using
# it in greenfield ASGI apps.
@property
def bounded_stream(self) -> BoundedStream: # type: ignore[override]
"""Alias to :attr:`~.stream`."""
return self.stream
@property
def root_path(self) -> str:
# PERF(kgriffs): try...except is faster than get() assuming that
# we normally expect the key to exist. Even though ASGI 3.0
# allows servers to omit the key when the value is an
# empty string, at least uvicorn still includes it explicitly in
# that case.
try:
return self.scope['root_path']
except KeyError:
pass
return ''
@property
# NOTE(caselit): Deprecated long ago. Warns since 4.0.
@deprecation.deprecated(
'Use `root_path` instead. '
'(This compatibility alias will be removed in Falcon 5.0.)',
is_property=True,
)
def app(self) -> str:
"""Deprecated alias for :attr:`root_path`."""
return self.root_path
@property
def scheme(self) -> str:
"""URL scheme used for the request.
One of ``'http'``, ``'https'``, ``'ws'``, or ``'wss'``. Defaults to ``'http'``
for the ``http`` scope, or ``'ws'`` for the ``websocket`` scope, when
the ASGI server does not include the scheme in the connection scope.
Note:
If the request was proxied, the scheme may not
match what was originally requested by the client.
:attr:`forwarded_scheme` can be used, instead,
to handle such cases.
"""
# PERF(kgriffs): Use try...except because we normally expect the
# key to be present.
try:
return self.scope['scheme']
except KeyError:
pass
return 'ws' if self.is_websocket else 'http'
@property
def forwarded_scheme(self) -> str:
# PERF(kgriffs): Since the Forwarded header is still relatively
# new, we expect X-Forwarded-Proto to be more common, so
# try to avoid calling self.forwarded if we can, since it uses a
# try...catch that will usually result in a relatively expensive
# raised exception.
if b'forwarded' in self._asgi_headers:
forwarded = self.forwarded
if forwarded:
# Use first hop, fall back on own scheme
scheme = forwarded[0].scheme or self.scheme
else:
scheme = self.scheme
else:
# PERF(kgriffs): This call should normally succeed, so
# just go for it without wasting time checking it
# first. Note also that the indexing operator is
# slightly faster than using get().
try:
scheme = (
self._asgi_headers[b'x-forwarded-proto'].decode('latin1').lower()
)
except KeyError:
scheme = self.scheme
return scheme
@property
def host(self) -> str:
"""Host request header field, if present.
If the Host header is missing, this attribute resolves to the ASGI server's
listening host name or IP address.
"""
try:
# NOTE(kgriffs): Prefer the host header; the web server
# isn't supposed to mess with it, so it should be what
# the client actually sent.
host_header = self._asgi_headers[b'host'].decode('latin1')
host, __ = parse_host(host_header)
except KeyError:
host, __ = self._asgi_server
return host
@property
def forwarded_host(self) -> str:
# PERF(kgriffs): Since the Forwarded header is still relatively
# new, we expect X-Forwarded-Host to be more common, so
# try to avoid calling self.forwarded if we can, since it uses a
# try...catch that will usually result in a relatively expensive
# raised exception.
if b'forwarded' in self._asgi_headers:
forwarded = self.forwarded
if forwarded:
# Use first hop, fall back on self
host = forwarded[0].host or self.netloc
else:
host = self.netloc
else:
# PERF(kgriffs): This call should normally succeed, assuming
# that the caller is expecting a forwarded header, so
# just go for it without wasting time checking it
# first.
try:
host = self._asgi_headers[b'x-forwarded-host'].decode('latin1')
except KeyError:
host = self.netloc
return host
@property
def access_route(self) -> list[str]:
"""IP address of the original client (if known), as
well as any known addresses of proxies fronting the ASGI server.
The following request headers are checked, in order of
preference, to determine the addresses:
- ``Forwarded``
- ``X-Forwarded-For``
- ``X-Real-IP``
In addition, the value of the "client" field from the ASGI
connection scope will be appended to the end of the list if
not already included in one of the above headers. If the
"client" field is not available, it will default to
``'127.0.0.1'``.
Note:
Per `RFC 7239`_, the access route may contain "unknown"
and obfuscated identifiers, in addition to IPv4 and
IPv6 addresses
.. _RFC 7239: https://tools.ietf.org/html/rfc7239
Warning:
Headers can be forged by any client or proxy. Use this
property with caution and validate all values before
using them. Do not rely on the access route to authorize
requests!
""" # noqa: D205
if self._cached_access_route is None:
# PERF(kgriffs): 'client' is optional according to the ASGI spec
# but it will probably be present, hence the try...except.
try:
# NOTE(kgriffs): The ASGI spec states that this can be
# any iterable. So we need to read and cache it in
# case the iterable is forward-only. But that is
# effectively what we are doing since we only ever
# access this field when setting self._cached_access_route
client, __ = self.scope['client']
# NOTE(vytas): Uvicorn may explicitly set scope['client'] to None.
# According to the spec, it does default to None when missing,
# but it is unclear whether it can be explicitly set to None, or
# it must be a valid iterable when present. In any case, we
# simply catch TypeError here too to account for this scenario.
except (KeyError, TypeError):
# NOTE(kgriffs): Default to localhost so that app logic does
# note have to special-case the handling of a missing
# client field in the connection scope. This should be
# a reasonable default, but we can change it later if
# that turns out not to be the case.
client = '127.0.0.1'
headers = self._asgi_headers
if b'forwarded' in headers:
self._cached_access_route = []
for hop in self.forwarded or ():
if hop.src is not None:
host, __ = parse_host(hop.src)
self._cached_access_route.append(host)
elif b'x-forwarded-for' in headers:
addresses = headers[b'x-forwarded-for'].decode('latin1').split(',')
self._cached_access_route = [ip.strip() for ip in addresses]
elif b'x-real-ip' in headers:
self._cached_access_route = [headers[b'x-real-ip'].decode('latin1')]
if self._cached_access_route:
if self._cached_access_route[-1] != client:
self._cached_access_route.append(client)
else:
self._cached_access_route = [client] if client else []
return self._cached_access_route
@property
def remote_addr(self) -> str:
"""IP address of the closest known client or proxy to
the ASGI server, or ``'127.0.0.1'`` if unknown.
This property's value is equivalent to the last element of the
:attr:`~.access_route` property.
""" # noqa: D205
route = self.access_route
return route[-1]
@property
def port(self) -> int:
try:
host_header = self._asgi_headers[b'host'].decode('latin1')
default_port = 443 if self._secure_scheme else 80
__, port = parse_host(host_header, default_port=default_port)
except KeyError:
__, port = self._asgi_server
return port
@property
def netloc(self) -> str:
# PERF(kgriffs): try..except is faster than get() when we
# expect the key to be present most of the time.
try:
netloc_value = self._asgi_headers[b'host'].decode('latin1')
except KeyError:
netloc_value, port = self._asgi_server
if self._secure_scheme:
if port != 443:
netloc_value = f'{netloc_value}:{port}'
else:
if port != 80:
netloc_value = f'{netloc_value}:{port}'
return netloc_value
async def get_media(self, default_when_empty: UnsetOr[Any] = _UNSET) -> Any:
"""Return a deserialized form of the request stream.
The first time this method is called, the request stream will be
deserialized using the Content-Type header as well as the media-type
handlers configured via :class:`falcon.RequestOptions`. The result will
be cached and returned in subsequent calls::
deserialized_media = await req.get_media()
If the matched media handler raises an error while attempting to
deserialize the request body, the exception will propagate up
to the caller.
See also :ref:`media` for more information regarding media handling.
Note:
When ``get_media`` is called on a request with an empty body,
Falcon will let the media handler try to deserialize the body
and will return the value returned by the handler or propagate
the exception raised by it. To instead return a different value
in case of an exception by the handler, specify the argument
``default_when_empty``.
Warning:
This operation will consume the request stream the first time
it's called and cache the results. Follow-up calls will just
retrieve a cached version of the object.
Args:
default_when_empty: Fallback value to return when there is no body
in the request and the media handler raises an error
(like in the case of the default JSON media handler).
By default, Falcon uses the value returned by the media handler
or propagates the raised exception, if any.
This value is not cached, and will be used only for the current
call.
Returns:
media (object): The deserialized media representation.
"""
if self._media is not _UNSET:
return self._media
if self._media_error is not None:
if default_when_empty is not _UNSET and isinstance(
self._media_error, errors.MediaNotFoundError
):
return default_when_empty
raise self._media_error
handler, _, deserialize_sync = self.options.media_handlers._resolve(
self.content_type, self.options.default_media_type
)
try:
if deserialize_sync:
self._media = deserialize_sync(await self.stream.read())
else:
self._media = await handler.deserialize_async(
self.stream, self.content_type, self.content_length
)
except errors.MediaNotFoundError as err:
self._media_error = err
if default_when_empty is not _UNSET:
return default_when_empty
raise
except Exception as err:
self._media_error = err
raise
finally:
if handler.exhaust_stream:
await self.stream.exhaust()
return self._media
media: Awaitable[Any] = cast(Awaitable[Any], property(get_media))
"""An awaitable property that acts as an alias for
:meth:`~.get_media`. This can be used to ease the porting of
a WSGI app to ASGI, although the ``await`` keyword must still be
added when referencing the property::
deserialized_media = await req.media
"""
@property
def if_match(self) -> list[ETag | Literal['*']] | None:
# TODO(kgriffs): It may make sense at some point to create a
# header property generator that DRY's up the memoization
# pattern for us.
if self._cached_if_match is _UNSET:
header_value = self._asgi_headers.get(b'if-match')
if header_value:
self._cached_if_match = helpers._parse_etags(
header_value.decode('latin1')
)
else:
self._cached_if_match = None
return self._cached_if_match
@property
def if_none_match(self) -> list[ETag | Literal['*']] | None:
if self._cached_if_none_match is _UNSET:
header_value = self._asgi_headers.get(b'if-none-match')
if header_value:
self._cached_if_none_match = helpers._parse_etags(
header_value.decode('latin1')
)
else:
self._cached_if_none_match = None
return self._cached_if_none_match
@property
def headers(self) -> Mapping[str, str]:
"""Raw HTTP headers from the request with dash-separated
names normalized to lowercase.
Note:
This property differs from the WSGI version of ``Request.headers``
in that the latter returns *uppercase* names for historical
reasons. Middleware, such as tracing and logging components, that
need to be compatible with both WSGI and ASGI apps should
use :attr:`headers_lower` instead.
Warning:
Parsing all the headers to create this dict is done the first
time this attribute is accessed, and the returned object should
be treated as read-only. Note that this parsing can be costly,
so unless you need all the headers in this format, you should
instead use the ``get_header()`` method or one of the
convenience attributes to get a value for a specific header.
""" # noqa: D205
# NOTE(kgriffs: First time here will cache the dict so all we
# have to do is clone it in the future.
if self._cached_headers is None:
self._cached_headers = {
name.decode('latin1'): value.decode('latin1')
for name, value in self._asgi_headers.items()
}
return self._cached_headers
@property
def headers_lower(self) -> Mapping[str, str]:
"""Alias for :attr:`headers` provided to expose a uniform way to
get lowercased headers for both WSGI and ASGI apps.
""" # noqa: D205
return self.headers
# ------------------------------------------------------------------------
# Public Methods
# ------------------------------------------------------------------------
@overload
def get_header(
self, name: str, required: Literal[True], default: str | None = ...
) -> str: ...
@overload
def get_header(self, name: str, required: bool = ..., *, default: str) -> str: ...
@overload
def get_header(
self, name: str, required: bool = False, default: str | None = ...
) -> str | None: ...
# PERF(kgriffs): Using kwarg cache, in lieu of @lru_cache on a helper method
# that is then called from get_header(), was benchmarked to be more
# efficient across CPython 3.6/3.8 (regardless of cythonization) and
# PyPy 3.6.
# TODO(vytas): Verify whether this is still the case on 3.12+.
def get_header(
self,
name: str,
required: bool = False,
default: str | None = None,
_name_cache: dict[str, bytes] = {},
) -> str | None:
"""Retrieve the raw string value for the given header.
Args:
name (str): Header name, case-insensitive (e.g., 'Content-Type')
Keyword Args:
required (bool): Set to ``True`` to raise
``HTTPBadRequest`` instead of returning gracefully when the
header is not found (default ``False``).
default (any): Value to return if the header
is not found (default ``None``).
Returns:
str: The value of the specified header if it exists, or
the default value if the header is not found and is not
required.
Raises:
HTTPBadRequest: The header was not found in the request, but
it was required.
"""
try:
asgi_name = _name_cache[name]
except KeyError:
asgi_name = name.lower().encode('latin1')
if len(_name_cache) < 64: # Somewhat arbitrary ceiling to mitigate abuse
_name_cache[name] = asgi_name
# Use try..except to optimize for the header existing in most cases
try:
# Don't take the time to cache beforehand, using HTTP naming.
# This will be faster, assuming that most headers are looked
# up only once, and not all headers will be requested.
return self._asgi_headers[asgi_name].decode('latin1')
except KeyError:
if not required:
return default
raise errors.HTTPMissingHeader(name)
@overload
def get_param(
self,
name: str,
required: Literal[True],
store: StoreArg = ...,
default: str | None = ...,
) -> str: ...
@overload
def get_param(
self,
name: str,
required: bool = ...,
store: StoreArg = ...,
*,
default: str,
) -> str: ...
@overload
def get_param(
self,
name: str,
required: bool = False,
store: StoreArg = None,
default: str | None = None,
) -> str | None: ...
def get_param(
self,
name: str,
required: bool = False,
store: StoreArg = None,
default: str | None = None,
) -> str | None:
"""Return the raw value of a query string parameter as a string.
Note:
If an HTML form is POSTed to the API using the
*application/x-www-form-urlencoded* media type, Falcon can
automatically parse the parameters from the request body via
:meth:`~falcon.asgi.Request.get_media`.
See also: :ref:`access_urlencoded_form`
Note:
Similar to the way multiple keys in form data are handled, if a
query parameter is included in the query string multiple times,
only one of those values will be returned, and it is undefined which
one. This caveat also applies when
:attr:`~falcon.RequestOptions.auto_parse_qs_csv` is enabled and the
given parameter is assigned to a comma-separated list of values
(e.g., ``foo=a,b,c``).
When multiple values are expected for a parameter,
:meth:`~.get_param_as_list` can be used to retrieve all of
them at once.
Args:
name (str): Parameter name, case-sensitive (e.g., 'sort').
Keyword Args:
required (bool): Set to ``True`` to raise
``HTTPBadRequest`` instead of returning ``None`` when the
parameter is not found (default ``False``).
store (dict): A ``dict``-like object in which to place
the value of the param, but only if the param is present.
default (any): If the param is not found returns the
given value instead of ``None``
Returns:
str: The value of the param as a string, or ``None`` if param is
not found and is not required.
Raises:
HTTPBadRequest: A required param is missing from the request.
"""
# TODO(kgriffs): It seems silly to have to do this, simply to provide
# the ASGI-specific docstring above. Is there a better way?
return super().get_param(name, required=required, store=store, default=default)
@property
def env(self) -> NoReturn: # type:ignore[override]
"""The env property is not available in ASGI. Use :attr:`~.store` instead."""
raise AttributeError(
'The env property is not available in ASGI. Use :attr:`~.store` instead'
)
def log_error(self, message: str) -> NoReturn:
"""Write a message to the server's log.
Warning:
Although this method is inherited from the WSGI Request class, it is
not supported for ASGI apps. Please use the standard library logging
framework instead.
"""
# NOTE(kgriffs): Normally the Pythonic thing to do would be to simply
# set this method to None so that it can't even be called, but we
# raise an error here to help people who are porting from WSGI.
raise NotImplementedError(
"ASGI does not support writing to the server's log. "
'Please use the standard library logging framework '
'instead.'
)
# ------------------------------------------------------------------------
# Private Helpers
# ------------------------------------------------------------------------
@property
def _asgi_server(self) -> tuple[str, int]:
if not self._asgi_server_cached:
try:
# NOTE(kgriffs): Since the ASGI spec states that 'server'
# can be any old iterable, we have to be careful to only
# read it once and cache the result in case the
# iterator is forward-only (not likely, but better
# safe than sorry).
self._asgi_server_cached = tuple(self.scope['server'])
except (KeyError, TypeError):
# NOTE(kgriffs): Not found, or was None
default_port = 443 if self._secure_scheme else 80
self._asgi_server_cached = ('localhost', default_port)
return self._asgi_server_cached
@property
def _secure_scheme(self) -> bool:
return self.scheme == 'https' or self.scheme == 'wss'
| Request |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_servicehook_stats.py | {
"start": 174,
"end": 1010
} | class ____(APITestCase):
def test_simple(self) -> None:
project = self.create_project()
hook = ServiceHook.objects.get_or_create(
project_id=project.id, actor_id=self.user.id, url="http://example.com"
)[0]
self.login_as(user=self.user)
path = (
f"/api/0/projects/{project.organization.slug}/{project.slug}/hooks/{hook.guid}/stats/"
)
tsdb.backend.incr(TSDBModel.servicehook_fired, hook.id, count=3)
response = self.client.get(path)
assert response.status_code == 200
assert response.status_code == 200, response.content
assert response.data[-1]["total"] == 3, response.data
for point in response.data[:-1]:
assert point["total"] == 0
assert len(response.data) == 24
| ProjectServiceHookStatsTest |
python | scipy__scipy | scipy/cluster/vq.py | {
"start": 3757,
"end": 30899
} | class ____(Exception):
pass
@xp_capabilities()
def whiten(obs, check_finite=None):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set by its standard deviation (i.e. "whiten"
it - as in "white noise" where each frequency has equal power).
Each feature is divided by its standard deviation across all observations
to give it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation::
# f0 f1 f2
obs = [[ 1., 1., 1.], #o0
[ 2., 2., 2.], #o1
[ 3., 3., 3.], #o2
[ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True for eager backends and False for lazy ones.
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
xp = array_namespace(obs)
if check_finite is None:
check_finite = not is_lazy_array(obs)
obs = _asarray(obs, check_finite=check_finite, xp=xp)
std_dev = xp.std(obs, axis=0)
zero_std_mask = std_dev == 0
std_dev = xpx.at(std_dev, zero_std_mask).set(1.0)
if check_finite and xp.any(zero_std_mask):
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning, stacklevel=2)
return obs / std_dev
@xp_capabilities(cpu_only=True, reason="uses spatial.distance.cdist",
jax_jit=False, allow_dask_compute=True)
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code::
# f0 f1 f2 f3
code_book = [[ 1., 2., 3., 4.], #c0
[ 1., 2., 3., 4.], #c1
[ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq
>>> code_book = np.array([[1., 1., 1.],
... [2., 2., 2.]])
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7]])
>>> vq(features, code_book)
(array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
"""
xp = array_namespace(obs, code_book)
obs = _asarray(obs, xp=xp, check_finite=check_finite)
code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
ct = xp.result_type(obs, code_book)
if xp.isdtype(ct, kind='real floating'):
c_obs = xp.astype(obs, ct, copy=False)
c_code_book = xp.astype(code_book, ct, copy=False)
c_obs = np.asarray(c_obs)
c_code_book = np.asarray(c_code_book)
result = _vq.vq(c_obs, c_code_book)
return xp.asarray(result[0]), xp.asarray(result[1])
return py_vq(obs, code_book, check_finite=False)
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the Euclidean distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (e.g., columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation; its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
xp = array_namespace(obs, code_book)
obs = _asarray(obs, xp=xp, check_finite=check_finite)
code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
if obs.ndim != code_book.ndim:
raise ValueError("Observation and code_book should have the same rank")
if obs.ndim == 1:
obs = obs[:, xp.newaxis]
code_book = code_book[:, xp.newaxis]
# Once `cdist` has array API support, this `xp.asarray` call can be removed
dist = xp.asarray(cdist(obs, code_book))
code = xp.argmin(dist, axis=1)
min_dist = xp.min(dist, axis=1)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5, xp=None):
""" "raw" version of k-means.
Returns
-------
code_book
The lowest distortion codebook found.
avg_dist
The average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> import numpy as np
>>> from scipy.cluster.vq import _kmeans
>>> features = np.array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = np.array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
xp = np if xp is None else xp
code_book = guess
diff = xp.inf
prev_avg_dists = deque([diff], maxlen=2)
np_obs = np.asarray(obs)
while diff > thresh:
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book, check_finite=False)
prev_avg_dists.append(xp.mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
obs_code = np.asarray(obs_code)
code_book, has_members = _vq.update_cluster_means(np_obs, obs_code,
code_book.shape[0])
code_book = code_book[has_members]
code_book = xp.asarray(code_book)
diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
return code_book, prev_avg_dists[1]
@xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
@_transition_to_rng("seed")
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
*, rng=None):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the classification of the observations
into clusters and updates the cluster centroids until the position of
the centroids is stable over successive iterations. In this
implementation of the algorithm, the stability of the centroids is
determined by comparing the absolute value of the change in the average
Euclidean distance between the observations and their corresponding
centroids against a threshold. This yields
a code book mapping centroids to codes and vice versa.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to threshold.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
Returns
-------
codebook : ndarray
A k by N array of k centroids. The ith centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
Note that the number of centroids is not necessarily the same as the
``k_or_guess`` parameter, because centroids assigned to no observations
are removed during iterations.
distortion : float
The mean (non-squared) Euclidean distance between the observations
passed and the centroids generated. Note the difference to the standard
definition of distortion in the context of the k-means algorithm, which
is the sum of the squared distances.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Notes
-----
For more functionalities or optimal performance, you can use
`sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
`This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
is a benchmark result of several implementations.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = np.array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = rng.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
if isinstance(k_or_guess, int):
xp = array_namespace(obs)
else:
xp = array_namespace(obs, k_or_guess)
obs = _asarray(obs, xp=xp, check_finite=check_finite)
guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite)
if iter < 1:
raise ValueError(f"iter must be at least 1, got {iter}")
# Determine whether a count (scalar) or an initial guess (array) was passed.
if xp_size(guess) != 1:
if xp_size(guess) < 1:
raise ValueError(f"Asked for 0 clusters. Initial book was {guess}")
return _kmeans(obs, guess, thresh=thresh, xp=xp)
# k_or_guess is a scalar, now verify that it's an integer
k = int(guess)
if k != guess:
raise ValueError("If k_or_guess is a scalar, it must be an integer.")
if k < 1:
raise ValueError(f"Asked for {k} clusters.")
rng = check_random_state(rng)
# initialize best distance value to a large value
best_dist = xp.inf
for i in range(iter):
# the initial code book is randomly selected from observations
guess = _kpoints(obs, k, rng, xp)
book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def _kpoints(data, k, rng, xp):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = rng.choice(data.shape[0], size=int(k), replace=False)
# convert to array with default integer dtype (avoids numpy#25607)
idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)
return xp.take(data, idx, axis=0)
def _krandinit(data, k, rng, xp):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = xp.mean(data, axis=0)
k = np.asarray(k)
if data.ndim == 1:
_cov = xpx.cov(data, xp=xp)
x = rng.standard_normal(size=k)
x = xp.asarray(x)
x *= xp.sqrt(_cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
x = rng.standard_normal(size=(k, xp_size(s)))
x = xp.asarray(x)
sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
x = x @ sVh
else:
_cov = xpx.atleast_nd(xpx.cov(data.T, xp=xp), ndim=2, xp=xp)
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = rng.standard_normal(size=(k, xp_size(mu)))
x = xp.asarray(x)
x = x @ xp.linalg.cholesky(_cov).T
x += mu
return x
def _kpp(data, k, rng, xp):
""" Picks k points in the data based on the kmeans++ method.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
init : ndarray
A 'k' by 'N' containing the initial centroids.
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
"""
ndim = len(data.shape)
if ndim == 1:
data = data[:, None]
dims = data.shape[1]
init = xp.empty((int(k), dims))
for i in range(k):
if i == 0:
data_idx = rng_integers(rng, data.shape[0])
else:
D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
probs = D2/D2.sum()
cumprobs = probs.cumsum()
r = rng.uniform()
cumprobs = np.asarray(cumprobs)
data_idx = int(np.searchsorted(cumprobs, r))
init = xpx.at(init)[i, :].set(data[data_idx, :])
if ndim == 1:
init = init[:, 0]
return init
_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmeans with a different initialization.",
stacklevel=3)
def _missing_raise():
"""Raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
@xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
@_transition_to_rng("seed")
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True, *, rng=None):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
>>> import numpy as np
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> rng.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[ 2.22274463, -0.61666946], # may vary
[ 0.54069047, 5.86541444],
[ 6.73846769, 4.01991898]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([29, 51, 20]) # may vary
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError(f"Invalid iter ({iter}), must be a positive integer.")
try:
miss_meth = _valid_miss_meth[missing]
except KeyError as e:
raise ValueError(f"Unknown missing method {missing!r}") from e
if isinstance(k, int):
xp = array_namespace(data)
else:
xp = array_namespace(data, k)
data = _asarray(data, xp=xp, check_finite=check_finite)
code_book = xp_copy(k, xp=xp)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if xp_size(data) < 1 or xp_size(code_book) < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or xp_size(code_book) > 1:
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = code_book.shape[0]
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(code_book)
if nc < 1:
raise ValueError(
f"Cannot ask kmeans2 for {nc} clusters (k was {code_book})"
)
elif nc != code_book:
warnings.warn("k was not an integer, was converted.", stacklevel=2)
try:
init_meth = _valid_init_meth[minit]
except KeyError as e:
raise ValueError(f"Unknown init method {minit!r}") from e
else:
rng = check_random_state(rng)
code_book = init_meth(data, code_book, rng, xp)
data = np.asarray(data)
code_book = np.asarray(code_book)
for _ in range(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book, check_finite=check_finite)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return xp.asarray(code_book), xp.asarray(label)
| ClusterError |
python | crytic__slither | slither/tools/mutator/mutators/LOR.py | {
"start": 298,
"end": 1917
} | class ____(AbstractMutator): # pylint: disable=too-few-public-methods
NAME = "LOR"
HELP = "Logical Operator Replacement"
def _mutate(self) -> Dict:
result: Dict = {}
for ( # pylint: disable=too-many-nested-blocks
function
) in self.contract.functions_and_modifiers_declared:
for node in function.nodes:
if not self.should_mutate_node(node):
continue
for ir in node.irs:
if isinstance(ir, Binary) and ir.type in logical_operators:
alternative_ops = logical_operators[:]
alternative_ops.remove(ir.type)
for op in alternative_ops:
# Get the string
start = node.source_mapping.start
stop = start + node.source_mapping.length
old_str = node.source_mapping.content
line_no = node.source_mapping.lines
# Replace the expression with true
new_str = f"{old_str.split(ir.type.value)[0]} {op.value} {old_str.split(ir.type.value)[1]}"
create_patch_with_line(
result,
self.in_file,
start,
stop,
old_str,
new_str,
line_no[0],
)
return result
| LOR |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 17905,
"end": 20965
} | class ____(TaskRunUniversalTransform):
"""
Releases any concurrency slots held by a run upon exiting a Running or
Cancelling state.
"""
async def after_transition(
self,
context: OrchestrationContext[orm_models.TaskRun, core.TaskRunPolicy],
) -> None:
if self.nullified_transition():
return
if context.validated_state and context.validated_state.type not in [
states.StateType.RUNNING,
states.StateType.CANCELLING,
]:
v2_names = [f"tag:{tag}" for tag in context.run.tags]
v2_limits = await concurrency_limits_v2.bulk_read_concurrency_limits(
context.session, names=v2_names
)
# Release V2 leases for this task run
if v2_limits:
lease_storage = get_concurrency_lease_storage()
lease_ids_to_reconcile: set[UUID] = set()
for v2_limit in v2_limits:
# Find holders for this limit
holders_with_leases: list[
tuple[UUID, ConcurrencyLeaseHolder]
] = await lease_storage.list_holders_for_limit(
limit_id=v2_limit.id,
)
# Find leases that belong to this task run
for lease_id, holder in holders_with_leases:
if holder.id == context.run.id:
lease_ids_to_reconcile.add(lease_id)
# Reconcile all found leases
for lease_id in lease_ids_to_reconcile:
try:
lease = await lease_storage.read_lease(
lease_id=lease_id,
)
if lease:
await concurrency_limits_v2.bulk_decrement_active_slots(
session=context.session,
concurrency_limit_ids=lease.resource_ids,
slots=lease.metadata.slots if lease.metadata else 1,
)
await lease_storage.revoke_lease(
lease_id=lease.id,
)
else:
logger.warning(f"Lease {lease_id} not found during release")
except Exception:
logger.warning(
f"Failed to reconcile lease {lease_id} during release",
exc_info=True,
)
v1_limits = (
await concurrency_limits.filter_concurrency_limits_for_orchestration(
context.session, tags=context.run.tags
)
)
for cl in v1_limits:
active_slots = set(cl.active_slots)
active_slots.discard(str(context.run.id))
cl.active_slots = list(active_slots)
| ReleaseTaskConcurrencySlots |
python | tornadoweb__tornado | tornado/httputil.py | {
"start": 28706,
"end": 36138
} | class ____(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
filename: str
body: bytes
content_type: str
def _parse_request_range(
range_header: str,
) -> Optional[Tuple[Optional[int], Optional[int]]]:
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str:
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return f"bytes {start}-{end}/{total}"
def _int_or_none(val: str) -> Optional[int]:
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(
content_type: str,
body: bytes,
arguments: Dict[str, List[bytes]],
files: Dict[str, List[HTTPFile]],
headers: Optional[HTTPHeaders] = None,
) -> None:
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if content_type.startswith("application/x-www-form-urlencoded"):
if headers and "Content-Encoding" in headers:
raise HTTPInputError(
"Unsupported Content-Encoding: %s" % headers["Content-Encoding"]
)
try:
# real charset decoding will happen in RequestHandler.decode_argument()
uri_arguments = parse_qs_bytes(body, keep_blank_values=True)
except Exception as e:
raise HTTPInputError("Invalid x-www-form-urlencoded body: %s" % e) from e
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
if headers and "Content-Encoding" in headers:
raise HTTPInputError(
"Unsupported Content-Encoding: %s" % headers["Content-Encoding"]
)
try:
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
raise HTTPInputError("multipart boundary not found")
except Exception as e:
raise HTTPInputError("Invalid multipart/form-data: %s" % e) from e
def parse_multipart_form_data(
boundary: bytes,
data: bytes,
arguments: Dict[str, List[bytes]],
files: Dict[str, List[HTTPFile]],
) -> None:
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
.. versionchanged:: 5.1
Now recognizes non-ASCII filenames in RFC 2231/5987
(``filename*=``) format.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
raise HTTPInputError("Invalid multipart/form-data: no final boundary found")
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
raise HTTPInputError("multipart/form-data missing headers")
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"), _chars_are_bytes=False)
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
raise HTTPInputError("Invalid multipart/form-data")
value = part[eoh + 4 : -2]
if not disp_params.get("name"):
raise HTTPInputError("multipart/form-data missing name")
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(
HTTPFile(
filename=disp_params["filename"], body=value, content_type=ctype
)
)
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(
ts: Union[int, float, tuple, time.struct_time, datetime.datetime],
) -> str:
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object. Naive `datetime.datetime` objects are assumed to represent
UTC; aware objects are converted to UTC before formatting.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, (int, float)):
time_num = ts
elif isinstance(ts, (tuple, time.struct_time)):
time_num = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
time_num = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(time_num, usegmt=True)
| HTTPFile |
python | getsentry__sentry | src/sentry/snuba/sessions_v2.py | {
"start": 19301,
"end": 19388
} | class ____(TypedDict):
id: int
slug: str
stats: list[_CategoryStats]
| _Project |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.