language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1031723,
"end": 1032197
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateIssueComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "issue_comment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
issue_comment = sgqlc.types.Field("IssueComment", graphql_name="issueComment")
"""The updated comment."""
|
UpdateIssueCommentPayload
|
python
|
mitmproxy__pdoc
|
pdoc/doc.py
|
{
"start": 45221,
"end": 50372
}
|
class ____(inspect.Signature):
"""
A subclass of `inspect.Signature` that pads __str__ over several lines
for complex signatures.
"""
MULTILINE_CUTOFF = 70
def _params(self) -> list[str]:
# redeclared here to keep code snipped below as-is.
_POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY
_VAR_POSITIONAL = inspect.Parameter.VAR_POSITIONAL
_KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY
# https://github.com/python/cpython/blob/799f8489d418b7f9207d333eac38214931bd7dcc/Lib/inspect.py#L3083-L3117
# Change: added re.sub() to formatted = ....
# ✂ start ✂
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
formatted = _remove_memory_addresses(formatted)
formatted = _remove_collections_abc(formatted)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append("/")
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append("*")
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append("/")
# ✂ end ✂
return result
def _return_annotation_str(self) -> str:
if self.return_annotation is not empty:
formatted = formatannotation(self.return_annotation)
return _remove_collections_abc(formatted)
else:
return ""
def __str__(self):
result = self._params()
return_annot = self._return_annotation_str()
total_len = sum(len(x) + 2 for x in result) + len(return_annot)
if total_len > self.MULTILINE_CUTOFF:
rendered = "(\n " + ",\n ".join(result) + "\n)"
else:
rendered = "({})".format(", ".join(result))
if return_annot:
rendered += f" -> {return_annot}"
return rendered
def _cut(x: str) -> str:
"""helper function for Doc.__repr__()"""
if len(x) < 20:
return x
else:
return x[:20] + "…"
def _docstr(doc: Doc) -> str:
"""helper function for Doc.__repr__()"""
docstr = []
if doc.is_inherited:
docstr.append(f"inherited from {'.'.join(doc.taken_from).rstrip('.')}")
if doc.docstring:
docstr.append(_cut(doc.docstring))
if docstr:
return f" # {', '.join(docstr)}"
else:
return ""
def _decorators(doc: Class | Function) -> str:
"""helper function for Doc.__repr__()"""
if doc.decorators:
return " ".join(doc.decorators) + " "
else:
return ""
def _children(doc: Namespace) -> str:
children = "\n".join(
repr(x)
for x in doc.members.values()
if not x.name.startswith("_") or x.name == "__init__"
)
if children:
children += "\n"
children = f"\n{textwrap.indent(children, ' ')}"
return children
def _safe_getattr(obj, attr, default):
"""Like `getattr()`, but never raises."""
try:
return getattr(obj, attr, default)
except Exception as e:
warnings.warn(
f"getattr({obj!r}, {attr!r}, {default!r}) raised an exception: {e!r}"
)
return default
def _safe_getdoc(obj: Any) -> str:
"""Like `inspect.getdoc()`, but never raises. Always returns a stripped string."""
try:
doc = inspect.getdoc(obj) or ""
except Exception as e:
warnings.warn(f"inspect.getdoc({obj!r}) raised an exception: {e!r}")
return ""
else:
return doc.strip()
_Enum_default_docstrings = tuple(
{
_safe_getdoc(enum.Enum),
_safe_getdoc(enum.IntEnum),
_safe_getdoc(_safe_getattr(enum, "StrEnum", enum.Enum)),
}
)
def _remove_memory_addresses(x: str) -> str:
"""Remove memory addresses from repr() output"""
return re.sub(r" at 0x[0-9a-fA-F]+(?=>)", "", x)
def _remove_collections_abc(x: str) -> str:
"""Remove 'collections.abc' from type signatures."""
return re.sub(r"(?!\.)\bcollections\.abc\.", "", x)
|
_PrettySignature
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_views.py
|
{
"start": 7585,
"end": 10919
}
|
class ____(TestCase):
"""Tests for search analytics page."""
fixtures = ["eric", "test_data", "test_search_queries"]
def setUp(self):
self.client.login(username="eric", password="test")
self.pip = Project.objects.get(slug="pip")
self.version = self.pip.versions.order_by("id").first()
self.analyics_page = reverse("projects_search_analytics", args=[self.pip.slug])
test_time = timezone.datetime(2019, 8, 2, 12, 0)
self.test_time = timezone.make_aware(test_time)
get(Feature, projects=[self.pip])
def test_top_queries(self):
with mock.patch("django.utils.timezone.now") as test_time:
test_time.return_value = self.test_time
expected_result = [
("hello world", 5, 0),
("documentation", 4, 0),
("read the docs", 4, 0),
("advertising", 3, 0),
("elasticsearch", 2, 0),
("sphinx", 2, 0),
("github", 1, 0),
("hello", 1, 0),
("search", 1, 0),
]
resp = self.client.get(self.analyics_page)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
expected_result,
list(resp.context["queries"]),
)
def test_query_count_of_1_month(self):
with mock.patch("django.utils.timezone.now") as test_time:
test_time.return_value = self.test_time
expected_result_data = [0] * 12 + [1, 1, 2] + [0] * 13 + [4, 3, 7]
resp = self.client.get(self.analyics_page, {"version": self.version.slug})
self.assertEqual(resp.status_code, 200)
self.assertListEqual(
expected_result_data,
resp.context["query_count_of_1_month"]["int_data"],
)
self.assertEqual(
"03 Jul",
resp.context["query_count_of_1_month"]["labels"][0],
)
self.assertEqual(
"02 Aug",
resp.context["query_count_of_1_month"]["labels"][-1],
)
self.assertEqual(
len(resp.context["query_count_of_1_month"]["labels"]),
31,
)
self.assertEqual(
len(resp.context["query_count_of_1_month"]["int_data"]),
31,
)
def test_generated_csv_data(self):
with mock.patch("django.utils.timezone.now") as test_time:
test_time.return_value = self.test_time
resp = self.client.get(
self.analyics_page, {"version": self.version.slug, "download": "true"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["Content-Type"], "text/csv")
# convert streaming data to csv format
content = b"".join(resp.streaming_content).splitlines()
content = [line.decode("utf-8") for line in content]
csv_data = csv.reader(content)
body = list(csv_data)
self.assertEqual(len(body), 24)
self.assertEqual(body[0][0], "Created Date")
self.assertEqual(body[1][1], "advertising")
self.assertEqual(body[-1][1], "hello world")
|
TestSearchAnalyticsView
|
python
|
django__django
|
tests/fixtures_model_package/tests.py
|
{
"start": 654,
"end": 2148
}
|
class ____(TestCase):
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "model_package_fixture1.json", verbosity=0)
self.assertQuerySetEqual(
Article.objects.all(),
[
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "model_package_fixture2.json", verbosity=0)
self.assertQuerySetEqual(
Article.objects.all(),
[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
with self.assertRaisesMessage(
CommandError, "No fixture named 'unknown' found."
):
management.call_command("loaddata", "unknown.json", verbosity=0)
self.assertQuerySetEqual(
Article.objects.all(),
[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
|
FixtureTestCase
|
python
|
scikit-learn__scikit-learn
|
sklearn/tests/metadata_routing_common.py
|
{
"start": 11436,
"end": 11833
}
|
class ____(ConsumingClassifier):
"""ConsumingClassifier without a predict_log_proba method, but with predict_proba.
Used to mimic dynamic method selection such as in
`BaggingClassifier.predict_log_proba()`.
"""
@property
def predict_log_proba(self):
raise AttributeError("This estimator does not support predict_log_proba")
|
ConsumingClassifierWithoutPredictLogProba
|
python
|
tensorflow__tensorflow
|
tensorflow/python/compiler/tensorrt/test/topk_test.py
|
{
"start": 1066,
"end": 1778
}
|
class ____(trt_test.TfTrtIntegrationTestBase):
"""Testing Top-K in TF-TRT conversion."""
def GraphFn(self, x):
k = 5
k_tensor = constant_op.constant(k, dtype=dtypes.int32, name="Const")
values, indices = nn_ops.top_k(x, k_tensor, name="TopK")
values = array_ops.identity(values, name="output_0")
indices = array_ops.identity(indices, name="output_1")
return values, indices
def GetParams(self):
k = 5
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 100]],
[[100, k], [100, k]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_000": ["Const", "TopK"]}
|
TopKTest
|
python
|
facebook__pyre-check
|
client/language_server/protocol.py
|
{
"start": 9460,
"end": 9729
}
|
class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
synchronization: Optional[TextDocumentSyncClientCapabilities] = None
publish_diagnostics: Optional[PublishDiagnosticsClientCapabilities] = None
@dataclasses.dataclass(frozen=True)
|
TextDocumentClientCapabilities
|
python
|
pytorch__pytorch
|
.github/scripts/filter_test_configs.py
|
{
"start": 1625,
"end": 23203
}
|
class ____(Enum):
DISABLED = "disabled"
UNSTABLE = "unstable"
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser(
"Filter all test configurations and keep only requested ones"
)
parser.add_argument(
"--test-matrix", type=str, required=True, help="the original test matrix"
)
parser.add_argument(
"--selected-test-configs",
type=str,
default="",
help="a comma-separated list of test configurations from the test matrix to keep",
)
parser.add_argument(
"--workflow", type=str, help="the name of the current workflow, i.e. pull"
)
parser.add_argument(
"--job-name",
type=str,
help="the name of the current job, i.e. linux-jammy-py3.8-gcc7 / build",
)
parser.add_argument("--pr-number", type=str, help="the pull request number")
parser.add_argument("--tag", type=str, help="the associated tag if it exists")
parser.add_argument(
"--event-name",
type=str,
help="name of the event that triggered the job (pull, schedule, etc)",
)
parser.add_argument(
"--schedule",
type=str,
help="cron schedule that triggered the job",
)
parser.add_argument(
"--branch",
type=str,
default=MAIN_BRANCH,
help="the branch name",
)
return parser.parse_args()
@cache
def get_pr_info(pr_number: int) -> dict[str, Any]:
"""
Dynamically get PR information
"""
# From https://docs.github.com/en/actions/learn-github-actions/environment-variables
pytorch_repo = os.environ.get("GITHUB_REPOSITORY", "pytorch/pytorch")
pytorch_github_api = f"https://api.github.com/repos/{pytorch_repo}"
github_token = os.environ["GITHUB_TOKEN"]
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {github_token}",
}
json_response: dict[str, Any] = download_json(
url=f"{pytorch_github_api}/issues/{pr_number}",
headers=headers,
)
if not json_response:
warnings.warn(f"Failed to get the labels for #{pr_number}")
return {}
return json_response
def get_labels(pr_number: int) -> set[str]:
"""
Dynamically get the latest list of labels from the pull request
"""
pr_info = get_pr_info(pr_number)
return {
label.get("name") for label in pr_info.get("labels", []) if label.get("name")
}
def filter_labels(labels: set[str], label_regex: Any) -> set[str]:
"""
Return the list of matching labels
"""
return {l for l in labels if re.match(label_regex, l)}
def filter(test_matrix: dict[str, list[Any]], labels: set[str]) -> dict[str, list[Any]]:
"""
Select the list of test config to run from the test matrix. The logic works
as follows:
If the PR has one or more test-config labels as specified, only these test configs
will be selected. This also works with ciflow labels, for example, if a PR has both
ciflow/trunk and test-config/functorch, only trunk functorch builds and tests will
be run.
If the PR has none of the test-config label, all tests are run as usual.
"""
filtered_test_matrix: dict[str, list[Any]] = {"include": []}
for entry in test_matrix.get("include", []):
config_name = entry.get("config", "")
if not config_name:
continue
label = f"{PREFIX}{config_name.strip()}"
if label in labels:
msg = f"Select {config_name} because label {label} is present in the pull request by the time the test starts"
info(msg)
filtered_test_matrix["include"].append(entry)
test_config_labels = filter_labels(labels, re.compile(f"{PREFIX}.+"))
if not filtered_test_matrix["include"] and not test_config_labels:
info("Found no test-config label on the PR, so all test configs are included")
# Found no test-config label and the filtered test matrix is empty, return the same
# test matrix as before so that all tests can be run normally
return test_matrix
else:
msg = f"Found {test_config_labels} on the PR so only these test configs are run"
info(msg)
# When the filter test matrix contain matches or if a valid test config label
# is found in the PR, return the filtered test matrix
return filtered_test_matrix
def filter_selected_test_configs(
test_matrix: dict[str, list[Any]], selected_test_configs: set[str]
) -> dict[str, list[Any]]:
"""
Keep only the selected configs if the list if not empty. Otherwise, keep all test configs.
This filter is used when the workflow is dispatched manually.
"""
if not selected_test_configs:
return test_matrix
filtered_test_matrix: dict[str, list[Any]] = {"include": []}
for entry in test_matrix.get("include", []):
config_name = entry.get("config", "")
if not config_name:
continue
if config_name in selected_test_configs:
filtered_test_matrix["include"].append(entry)
return filtered_test_matrix
def set_periodic_modes(
test_matrix: dict[str, list[Any]], job_name: Optional[str]
) -> dict[str, list[Any]]:
"""
Apply all periodic modes when running under a schedule
"""
scheduled_test_matrix: dict[str, list[Any]] = {
"include": [],
}
for config in test_matrix.get("include", []):
for mode, cond in SUPPORTED_PERIODICAL_MODES.items():
if not cond(job_name):
continue
cfg = config.copy()
cfg[mode] = mode
scheduled_test_matrix["include"].append(cfg)
return scheduled_test_matrix
def mark_unstable_jobs(
workflow: str, job_name: str, test_matrix: dict[str, list[Any]]
) -> dict[str, list[Any]]:
"""
Check the list of unstable jobs and mark them accordingly. Note that if a job
is unstable, all its dependents will also be marked accordingly
"""
return process_jobs(
workflow=workflow,
job_name=job_name,
test_matrix=test_matrix,
issue_type=IssueType.UNSTABLE,
url=UNSTABLE_JOBS_URL,
)
def remove_disabled_jobs(
workflow: str, job_name: str, test_matrix: dict[str, list[Any]]
) -> dict[str, list[Any]]:
"""
Check the list of disabled jobs, remove the current job and all its dependents
if it exists in the list
"""
return process_jobs(
workflow=workflow,
job_name=job_name,
test_matrix=test_matrix,
issue_type=IssueType.DISABLED,
url=DISABLED_JOBS_URL,
)
def _filter_jobs(
test_matrix: dict[str, list[Any]],
issue_type: IssueType,
target_cfg: Optional[str] = None,
) -> dict[str, list[Any]]:
"""
An utility function used to actually apply the job filter
"""
# The result will be stored here
filtered_test_matrix: dict[str, list[Any]] = {"include": []}
# This is an issue to disable a CI job
if issue_type == IssueType.DISABLED:
# If there is a target config, disable (remove) only that
if target_cfg:
# Remove the target config from the test matrix
filtered_test_matrix["include"] = [
r for r in test_matrix["include"] if r.get("config", "") != target_cfg
]
return filtered_test_matrix
if issue_type == IssueType.UNSTABLE:
for r in test_matrix["include"]:
cpy = r.copy()
if (target_cfg and r.get("config", "") == target_cfg) or not target_cfg:
# If there is a target config, only mark that as unstable, otherwise,
# mark everything as unstable
cpy[IssueType.UNSTABLE.value] = IssueType.UNSTABLE.value
filtered_test_matrix["include"].append(cpy)
return filtered_test_matrix
# No matching issue, return everything
return test_matrix
def process_jobs(
workflow: str,
job_name: str,
test_matrix: dict[str, list[Any]],
issue_type: IssueType,
url: str,
) -> dict[str, list[Any]]:
"""
Both disabled and unstable jobs are in the following format:
{
"WORKFLOW / PLATFORM / JOB (CONFIG)": [
AUTHOR,
ISSUE_NUMBER,
ISSUE_URL,
WORKFLOW,
PLATFORM,
JOB (CONFIG),
],
"pull / linux-bionic-py3.8-clang9 / test (dynamo)": [
"pytorchbot",
"94861",
"https://github.com/pytorch/pytorch/issues/94861",
"pull",
"linux-bionic-py3.8-clang9",
"test (dynamo)",
],
}
"""
try:
# The job name from github is in the PLATFORM / JOB (CONFIG) format, so breaking
# it into its two components first
current_platform, _ = (n.strip() for n in job_name.split(JOB_NAME_SEP, 1) if n)
except ValueError:
warnings.warn(f"Invalid job name {job_name}, returning")
return test_matrix
for record in download_json(url=url, headers={}).values():
(
author,
_,
target_url,
target_workflow,
target_platform,
target_job_cfg,
) = record
if target_workflow != workflow:
# The current workflow doesn't match this record
continue
cleanup_regex = rf"(-{BUILD_JOB_NAME}|-{TEST_JOB_NAME})$"
# There is an exception here for binary build workflows in which the platform
# names have the build and test suffix. For example, we have a build job called
# manywheel-py3-cuda11_8-build / build and its subsequent test job called
# manywheel-py3-cuda11_8-test / test. So they are linked, but their suffixes
# are different
target_platform_no_suffix = re.sub(cleanup_regex, "", target_platform)
current_platform_no_suffix = re.sub(cleanup_regex, "", current_platform)
if (
target_platform != current_platform
and target_platform_no_suffix != current_platform_no_suffix
):
# The current platform doesn't match this record
continue
# The logic after this is fairly complicated:
#
# - If the target record doesn't have the optional job (config) name,
# i.e. pull / linux-bionic-py3.8-clang9, all build and test jobs will
# be skipped if it's a disabled record or marked as unstable if it's
# an unstable record
#
# - If the target record has the job name and it's a build job, i.e.
# pull / linux-bionic-py3.8-clang9 / build, all build and test jobs
# will be skipped if it's a disabled record or marked as unstable if
# it's an unstable record, because the latter requires the former
#
# - If the target record has the job name and it's a test job without
# the config part, i.e. pull / linux-bionic-py3.8-clang9 / test, all
# test jobs will be skipped if it's a disabled record or marked as
# unstable if it's an unstable record
#
# - If the target record has the job (config) name, only that test config
# will be skipped or marked as unstable
if not target_job_cfg:
msg = (
f"Issue {target_url} created by {author} has {issue_type.value} "
+ f"all CI jobs for {workflow} / {job_name}"
)
info(msg)
return _filter_jobs(
test_matrix=test_matrix,
issue_type=issue_type,
)
if target_job_cfg == BUILD_JOB_NAME:
msg = (
f"Issue {target_url} created by {author} has {issue_type.value} "
+ f"the build job for {workflow} / {job_name}"
)
info(msg)
return _filter_jobs(
test_matrix=test_matrix,
issue_type=issue_type,
)
if target_job_cfg in (TEST_JOB_NAME, BUILD_AND_TEST_JOB_NAME):
msg = (
f"Issue {target_url} created by {author} has {issue_type.value} "
+ f"all the test jobs for {workflow} / {job_name}"
)
info(msg)
return _filter_jobs(
test_matrix=test_matrix,
issue_type=issue_type,
)
m = JOB_NAME_CFG_REGEX.match(target_job_cfg)
if m:
target_job = m.group("job")
# Make sure that the job name is a valid test job name first before checking the config
if target_job in (TEST_JOB_NAME, BUILD_AND_TEST_JOB_NAME):
target_cfg = m.group("cfg")
# NB: There can be multiple unstable configurations, i.e. inductor, inductor_huggingface
test_matrix = _filter_jobs(
test_matrix=test_matrix,
issue_type=issue_type,
target_cfg=target_cfg,
)
else:
warnings.warn(
f"Found a matching {issue_type.value} issue {target_url} for {workflow} / {job_name}, "
+ f"but the name {target_job_cfg} is invalid"
)
# Found no matching target, return the same input test matrix
return test_matrix
def download_json(url: str, headers: dict[str, str], num_retries: int = 3) -> Any:
for _ in range(num_retries):
try:
req = Request(url=url, headers=headers)
content = urlopen(req, timeout=5).read().decode("utf-8")
return json.loads(content)
except Exception as e:
warnings.warn(f"Could not download {url}: {e}")
warnings.warn(f"All {num_retries} retries exhausted, downloading {url} failed")
return {}
def set_output(name: str, val: Any) -> None:
print(f"Setting output {name}={val}")
if os.getenv("GITHUB_OUTPUT"):
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
print(f"{name}={val}", file=env)
else:
print(f"::set-output name={name}::{val}")
def parse_reenabled_issues(s: Optional[str]) -> list[str]:
# NB: When the PR body is empty, GitHub API returns a None value, which is
# passed into this function
if not s:
return []
# The regex is meant to match all *case-insensitive* keywords that
# GitHub has delineated would link PRs to issues, more details here:
# https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue.
# E.g., "Close #62851", "fixES #62851" and "RESOLVED #62851" would all match, but not
# "closes #62851" --> extra space, "fixing #62851" --> not a keyword, nor "fix 62851" --> no #
issue_numbers = [x[5] for x in re.findall(REENABLE_TEST_REGEX, s)]
return issue_numbers
def get_reenabled_issues(pr_body: str = "") -> list[str]:
default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
try:
commit_messages = subprocess.check_output(
f"git cherry -v {default_branch}".split(" ")
).decode("utf-8")
except Exception as e:
warnings.warn(f"failed to get commit messages: {e}")
commit_messages = ""
return parse_reenabled_issues(pr_body) + parse_reenabled_issues(commit_messages)
def check_for_setting(labels: set[str], body: str, setting: str) -> bool:
return setting in labels or f"[{setting}]" in body
def perform_misc_tasks(
labels: set[str],
test_matrix: dict[str, list[Any]],
job_name: str,
pr_body: str,
branch: Optional[str] = None,
tag: Optional[str] = None,
) -> None:
"""
In addition to apply the filter logic, the script also does the following
misc tasks to set keep-going and is-unstable variables
"""
set_output(
"keep-going",
branch == MAIN_BRANCH
or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag))
# Pattern for tags created via manual run on HUD
or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag))
or check_for_setting(labels, pr_body, "keep-going"),
)
set_output(
"ci-verbose-test-logs",
check_for_setting(labels, pr_body, "ci-verbose-test-logs"),
)
set_output(
"ci-test-showlocals", check_for_setting(labels, pr_body, "ci-test-showlocals")
)
set_output(
"ci-no-test-timeout", check_for_setting(labels, pr_body, "ci-no-test-timeout")
)
set_output("ci-no-td", check_for_setting(labels, pr_body, "ci-no-td"))
# Only relevant for the one linux distributed cuda job, delete this when TD
# is rolled out completely
set_output(
"ci-td-distributed", check_for_setting(labels, pr_body, "ci-td-distributed")
)
# Obviously, if the job name includes unstable, then this is an unstable job
is_unstable = job_name and IssueType.UNSTABLE.value in job_name
if not is_unstable and test_matrix and test_matrix.get("include"):
# Even when the job name doesn't mention unstable, we will also mark it as
# unstable when the test matrix only includes unstable jobs. Basically, this
# logic allows build or build-and-test jobs to be marked as unstable too.
#
# Basically, when a build job is unstable, all the subsequent test jobs are
# also unstable. And when all test jobs are unstable, we will also treat the
# build job as unstable. It's simpler this way
is_unstable = all(IssueType.UNSTABLE.value in r for r in test_matrix["include"])
set_output(
"is-unstable",
is_unstable,
)
set_output("reenabled-issues", ",".join(get_reenabled_issues(pr_body=pr_body)))
if MEM_LEAK_LABEL in labels:
# Enable mem leak check if label is added
for config in test_matrix.get("include", []):
if is_cuda_or_rocm_job(job_name):
config["mem_leak_check"] = "mem_leak_check"
def main() -> None:
args = parse_args()
# Load the original test matrix set by the workflow. Its format, however,
# doesn't follow the strict JSON format, so we load it using yaml here for
# its more relaxed syntax
test_matrix = yaml.safe_load(args.test_matrix)
if test_matrix is None:
warnings.warn(f"Invalid test matrix input '{args.test_matrix}', exiting")
# We handle invalid test matrix gracefully by marking it as empty
set_output("is-test-matrix-empty", True)
sys.exit(0)
pr_number = args.pr_number
tag = args.tag
# If the tag matches, we can get the PR number from it, this is from ciflow
# workflow dispatcher
tag_regex = re.compile(r"^ciflow/[\w\-]+/(?P<pr_number>\d+)$")
labels = set()
if pr_number:
# If a PR number is set, query all the labels from that PR
labels = get_labels(int(pr_number))
# Then filter the test matrix and keep only the selected ones
filtered_test_matrix = filter(test_matrix, labels)
elif tag:
m = tag_regex.match(tag)
if m:
pr_number = m.group("pr_number")
# The PR number can also come from the tag in ciflow tag event
labels = get_labels(int(pr_number))
# Filter the test matrix and keep only the selected ones
filtered_test_matrix = filter(test_matrix, labels)
else:
# There is a tag but it isn't ciflow, so there is nothing left to do
filtered_test_matrix = test_matrix
else:
# No PR number, no tag, we can just return the test matrix as it is
filtered_test_matrix = test_matrix
if args.selected_test_configs:
selected_test_configs = {
v.strip().lower()
for v in args.selected_test_configs.split(",")
if v.strip()
}
filtered_test_matrix = filter_selected_test_configs(
filtered_test_matrix, selected_test_configs
)
if args.event_name == "schedule" and args.schedule == "29 8 * * *":
# we don't want to run the mem leak check or disabled tests on normal
# periodically scheduled jobs, only the ones at this time
filtered_test_matrix = set_periodic_modes(filtered_test_matrix, args.job_name)
if args.workflow and args.job_name and args.branch not in EXCLUDED_BRANCHES:
# If both workflow and job name are available, we will check if the current job
# is disabled and remove it and all its dependants from the test matrix
filtered_test_matrix = remove_disabled_jobs(
args.workflow, args.job_name, filtered_test_matrix
)
filtered_test_matrix = mark_unstable_jobs(
args.workflow, args.job_name, filtered_test_matrix
)
pr_body = get_pr_info(int(pr_number)).get("body", "") if pr_number else ""
perform_misc_tasks(
labels=labels,
test_matrix=filtered_test_matrix,
job_name=args.job_name,
pr_body=pr_body if pr_body else "",
branch=args.branch,
tag=tag,
)
# Set the filtered test matrix as the output
set_output("test-matrix", json.dumps(filtered_test_matrix))
filtered_test_matrix_len = len(filtered_test_matrix.get("include", []))
# and also put a flag if the test matrix is empty, so subsequent jobs can
# quickly check it without the need to parse the JSON string
set_output("is-test-matrix-empty", filtered_test_matrix_len == 0)
if __name__ == "__main__":
main()
|
IssueType
|
python
|
walkccc__LeetCode
|
solutions/3341. Find Minimum Time to Reach Last Room I/3341.py
|
{
"start": 0,
"end": 971
}
|
class ____:
def minTimeToReach(self, moveTime: list[list[int]]) -> int:
return self._dijkstra(moveTime,
(0, 0), (len(moveTime) - 1, len(moveTime[0]) - 1))
def _dijkstra(
self,
moveTime: list[list[int]],
src: tuple[int, int],
dst: tuple[int, int]
) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(moveTime)
n = len(moveTime[0])
dist = [[math.inf] * n for _ in range(m)]
dist[0][0] = 0
minHeap = [(0, src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if u == dst:
return d
i, j = u
if d > dist[i][j]:
continue
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
newDist = max(moveTime[x][y], d) + 1
if newDist < dist[x][y]:
dist[x][y] = newDist
heapq.heappush(minHeap, (newDist, (x, y)))
return -1
|
Solution
|
python
|
pypa__pip
|
src/pip/_internal/exceptions.py
|
{
"start": 9504,
"end": 9614
}
|
class ____(PipError):
"""Raised when there's a previous conflicting build directory"""
|
PreviousBuildDirError
|
python
|
tornadoweb__tornado
|
tornado/test/httpclient_test.py
|
{
"start": 29644,
"end": 30791
}
|
class ____(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", user_agent="foo"), dict()
)
self.assertEqual(proxy.user_agent, "foo")
def test_default_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/"), dict(network_interface="foo")
)
self.assertEqual(proxy.network_interface, "foo")
def test_both_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", proxy_host="foo"), dict(proxy_host="bar")
)
self.assertEqual(proxy.proxy_host, "foo")
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
self.assertIsNone(proxy.auth_username)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), None)
self.assertIsNone(proxy.auth_username)
|
RequestProxyTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox03.py
|
{
"start": 315,
"end": 997
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_textbox("E9", "This is some text")
worksheet1.insert_textbox("H18", "Some more text")
worksheet2.insert_textbox("C4", "Hello")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
networkx__networkx
|
networkx/generators/degree_seq.py
|
{
"start": 25991,
"end": 30491
}
|
class ____:
# class to generate random graphs with a given degree sequence
# use random_degree_sequence_graph()
def __init__(self, degree, rng):
self.rng = rng
self.degree = list(degree)
if not nx.is_graphical(self.degree):
raise nx.NetworkXUnfeasible("degree sequence is not graphical")
# node labels are integers 0,...,n-1
self.m = sum(self.degree) / 2.0 # number of edges
try:
self.dmax = max(self.degree) # maximum degree
except ValueError:
self.dmax = 0
def generate(self):
# remaining_degree is mapping from int->remaining degree
self.remaining_degree = dict(enumerate(self.degree))
# add all nodes to make sure we get isolated nodes
self.graph = nx.Graph()
self.graph.add_nodes_from(self.remaining_degree)
# remove zero degree nodes
for n, d in list(self.remaining_degree.items()):
if d == 0:
del self.remaining_degree[n]
if len(self.remaining_degree) > 0:
# build graph in three phases according to how many unmatched edges
self.phase1()
self.phase2()
self.phase3()
return self.graph
def update_remaining(self, u, v, aux_graph=None):
# decrement remaining nodes, modify auxiliary graph if in phase3
if aux_graph is not None:
# remove edges from auxiliary graph
aux_graph.remove_edge(u, v)
if self.remaining_degree[u] == 1:
del self.remaining_degree[u]
if aux_graph is not None:
aux_graph.remove_node(u)
else:
self.remaining_degree[u] -= 1
if self.remaining_degree[v] == 1:
del self.remaining_degree[v]
if aux_graph is not None:
aux_graph.remove_node(v)
else:
self.remaining_degree[v] -= 1
def p(self, u, v):
# degree probability
return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m)
def q(self, u, v):
# remaining degree probability
norm = max(self.remaining_degree.values()) ** 2
return self.remaining_degree[u] * self.remaining_degree[v] / norm
def suitable_edge(self):
"""Returns True if and only if an arbitrary remaining node can
potentially be joined with some other remaining node.
"""
nodes = iter(self.remaining_degree)
u = next(nodes)
return any(v not in self.graph[u] for v in nodes)
def phase1(self):
# choose node pairs from (degree) weighted distribution
rem_deg = self.remaining_degree
while sum(rem_deg.values()) >= 2 * self.dmax**2:
u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng))
if self.graph.has_edge(u, v):
continue
if self.rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase2(self):
# choose remaining nodes uniformly at random and use rejection sampling
remaining_deg = self.remaining_degree
rng = self.rng
while len(remaining_deg) >= 2 * self.dmax:
while True:
u, v = sorted(rng.sample(list(remaining_deg.keys()), 2))
if self.graph.has_edge(u, v):
continue
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase3(self):
# build potential remaining edges and choose with rejection sampling
potential_edges = combinations(self.remaining_degree, 2)
# build auxiliary graph of potential edges not already in graph
H = nx.Graph(
[(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)]
)
rng = self.rng
while self.remaining_degree:
if not self.suitable_edge():
raise nx.NetworkXUnfeasible("no suitable edges left")
while True:
u, v = sorted(rng.choice(list(H.edges())))
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v, aux_graph=H)
|
DegreeSequenceRandomGraph
|
python
|
getsentry__sentry
|
src/sentry/attachments/default.py
|
{
"start": 80,
"end": 217
}
|
class ____(BaseAttachmentCache):
def __init__(self, **options):
super().__init__(default_cache, **options)
|
DefaultAttachmentCache
|
python
|
openai__openai-python
|
src/openai/types/beta/thread.py
|
{
"start": 332,
"end": 633
}
|
class ____(BaseModel):
file_ids: Optional[List[str]] = None
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
available to the `code_interpreter` tool. There can be a maximum of 20 files
associated with the tool.
"""
|
ToolResourcesCodeInterpreter
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_metadata.py
|
{
"start": 102012,
"end": 114083
}
|
class ____(testing.AssertsCompiledSQL, fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
)
@testing.fixture
def existing_meta(self):
meta2 = MetaData()
Table("users", meta2, autoload_with=testing.db)
return meta2
@testing.fixture
def empty_meta(self):
return MetaData()
@testing.variation(
"scenario",
[
"inplace",
"inplace_ee",
"separate_ee_key_first",
"separate_ee_key_second",
"separate_ee_key_append_no_replace",
"separate_ee_key_append_replace",
],
)
@testing.variation("both_have_keys", [True, False])
def test_table_w_two_same_named_columns(
self, empty_meta, scenario: Variation, both_have_keys: Variation
):
if scenario.inplace:
with expect_raises_message(
exc.DuplicateColumnError,
"A column with name 'b' is already present in table 'users'.",
):
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
Column("b", String, key="b2"),
)
return
elif scenario.inplace_ee:
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
Column("b", String, key="b2"),
extend_existing=True,
)
elif scenario.separate_ee_key_first:
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b2"),
)
expected_warnings = (
[
'Column with user-specified key "b2" is being '
'replaced with plain named column "b", key "b2" '
"is being removed."
]
if not both_have_keys
else []
)
with expect_warnings(*expected_warnings):
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
extend_existing=True,
)
elif scenario.separate_ee_key_second:
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
)
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b2"),
extend_existing=True,
)
elif scenario.separate_ee_key_append_no_replace:
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
)
with expect_raises_message(
exc.DuplicateColumnError,
r"A column with name 'b' is already present in table 'users'. "
r"Specify replace_existing=True to Table.append_column\(\) ",
):
t1.append_column(Column("b", String, key="b2"))
return
elif scenario.separate_ee_key_append_replace:
t1 = Table(
"users",
empty_meta,
Column("a", String),
Column("b", String, key="b1" if both_have_keys else None),
)
t1.append_column(
Column("b", String, key="b2"), replace_existing=True
)
else:
scenario.fail()
if scenario.separate_ee_key_first:
if both_have_keys:
eq_(t1.c.keys(), ["a", "b1"])
else:
eq_(t1.c.keys(), ["a", "b"])
else:
eq_(t1.c.keys(), ["a", "b2"])
self.assert_compile(select(t1), "SELECT users.a, users.b FROM users")
def test_exception_no_flags(self, existing_meta):
def go():
Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
)
assert_raises_message(
exc.InvalidRequestError,
"Table 'users' is already defined for this MetaData instance.",
go,
)
def test_keep_plus_existing_raises(self, existing_meta):
assert_raises(
exc.ArgumentError,
Table,
"users",
existing_meta,
keep_existing=True,
extend_existing=True,
)
def test_keep_existing_no_dupe_constraints(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
keep_existing=True,
)
assert "name" in users.c
assert "id" in users.c
eq_(len(users.constraints), 2)
u2 = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
keep_existing=True,
)
eq_(len(u2.constraints), 2)
def test_extend_existing_dupes_constraints(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
extend_existing=True,
)
assert "name" in users.c
assert "id" in users.c
eq_(len(users.constraints), 2)
u2 = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
extend_existing=True,
)
# constraint got duped
eq_(len(u2.constraints), 3)
def test_autoload_replace_column(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
)
assert isinstance(users.c.name.type, Unicode)
def test_keep_existing_coltype(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
keep_existing=True,
)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote(self, existing_meta):
users = Table(
"users",
existing_meta,
quote=True,
autoload_with=testing.db,
keep_existing=True,
)
assert not users.name.quote
def test_keep_existing_add_column(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
autoload_with=testing.db,
keep_existing=True,
)
assert "foo" not in users.c
def test_keep_existing_coltype_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
keep_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
@testing.skip_if(
lambda: testing.db.dialect.requires_name_normalize,
"test depends on lowercase as case insensitive",
)
def test_keep_existing_quote_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
quote=True,
autoload_with=testing.db,
keep_existing=True,
)
assert users.name.quote
def test_keep_existing_add_column_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("foo", Integer),
autoload_with=testing.db,
keep_existing=True,
)
assert "foo" in users.c
def test_keep_existing_coltype_no_reflection(self, existing_meta):
users = Table(
"users", existing_meta, Column("name", Unicode), keep_existing=True
)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote_no_reflection(self, existing_meta):
users = Table("users", existing_meta, quote=True, keep_existing=True)
assert not users.name.quote
def test_keep_existing_add_column_no_reflection(self, existing_meta):
users = Table(
"users", existing_meta, Column("foo", Integer), keep_existing=True
)
assert "foo" not in users.c
def test_extend_existing_coltype(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote(self, existing_meta):
assert_raises_message(
tsa.exc.ArgumentError,
"Can't redefine 'quote' or 'quote_schema' arguments",
Table,
"users",
existing_meta,
quote=True,
autoload_with=testing.db,
extend_existing=True,
)
def test_extend_existing_add_column(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
autoload_with=testing.db,
extend_existing=True,
)
assert "foo" in users.c
def test_extend_existing_coltype_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
@testing.skip_if(
lambda: testing.db.dialect.requires_name_normalize,
"test depends on lowercase as case insensitive",
)
def test_extend_existing_quote_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
quote=True,
autoload_with=testing.db,
extend_existing=True,
)
assert users.name.quote
def test_extend_existing_add_column_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("foo", Integer),
autoload_with=testing.db,
extend_existing=True,
)
assert "foo" in users.c
def test_extend_existing_coltype_no_reflection(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote_no_reflection(self, existing_meta):
assert_raises_message(
tsa.exc.ArgumentError,
"Can't redefine 'quote' or 'quote_schema' arguments",
Table,
"users",
existing_meta,
quote=True,
extend_existing=True,
)
def test_extend_existing_add_column_no_reflection(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
extend_existing=True,
)
assert "foo" in users.c
|
UseExistingTest
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_token_revocation.py
|
{
"start": 1239,
"end": 7586
}
|
class ____(BaseTest):
def test_revoke_access_token(self):
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": tok.token,
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"")
self.assertFalse(AccessToken.objects.filter(pk=tok.pk).exists())
def test_revoke_access_token_public(self):
public_app = Application(
name="Test Application",
redirect_uris="http://localhost http://example.com http://example.org",
user=self.dev_user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
public_app.save()
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=public_app,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
data = {
"client_id": public_app.client_id,
"token": tok.token,
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
def test_revoke_access_token_with_hint(self):
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": tok.token,
"token_type_hint": "access_token",
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertFalse(AccessToken.objects.filter(pk=tok.pk).exists())
def test_revoke_access_token_with_invalid_hint(self):
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
# invalid hint should have no effect
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": tok.token,
"token_type_hint": "bad_hint",
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertFalse(AccessToken.objects.filter(pk=tok.pk).exists())
def test_revoke_refresh_token(self):
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
rtok = RefreshToken.objects.create(
user=self.test_user, token="999999999", application=self.application, access_token=tok
)
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": rtok.token,
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
refresh_token = RefreshToken.objects.filter(pk=rtok.pk).first()
self.assertIsNotNone(refresh_token.revoked)
self.assertFalse(AccessToken.objects.filter(pk=rtok.access_token.pk).exists())
def test_revoke_refresh_token_with_revoked_access_token(self):
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
rtok = RefreshToken.objects.create(
user=self.test_user, token="999999999", application=self.application, access_token=tok
)
for token in (tok.token, rtok.token):
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": token,
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertFalse(AccessToken.objects.filter(pk=tok.pk).exists())
refresh_token = RefreshToken.objects.filter(pk=rtok.pk).first()
self.assertIsNotNone(refresh_token.revoked)
def test_revoke_token_with_wrong_hint(self):
"""
From the revocation rfc, `Section 4.1.2`_ :
If the server is unable to locate the token using the given hint,
it MUST extend its search across all of its supported token types
.. _`Section 4.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
"""
tok = AccessToken.objects.create(
user=self.test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
data = {
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
"token": tok.token,
"token_type_hint": "refresh_token",
}
url = reverse("oauth2_provider:revoke-token")
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertFalse(AccessToken.objects.filter(pk=tok.pk).exists())
|
TestRevocationView
|
python
|
euske__pdfminer
|
pdfminer/pdfdocument.py
|
{
"start": 8398,
"end": 12552
}
|
class ____:
PASSWORD_PADDING = (b'(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08'
b'..\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz')
supported_revisions = (2, 3)
def __init__(self, docid, param, password=b''):
self.docid = docid
self.param = param
self.password = password
self.init()
return
def init(self):
self.init_params()
if self.r not in self.supported_revisions:
raise PDFEncryptionError('Unsupported revision: param=%r' % self.param)
self.init_key()
return
def init_params(self):
self.v = int_value(self.param.get('V', 0))
self.r = int_value(self.param['R'])
self.p = int_value(self.param['P'])
self.o = bytes_value(self.param['O'])
self.u = bytes_value(self.param['U'])
self.length = int_value(self.param.get('Length', 40))
return
def init_key(self):
self.key = self.authenticate(self.password)
if self.key is None:
raise PDFPasswordIncorrect
return
def is_printable(self):
return bool(self.p & 4)
def is_modifiable(self):
return bool(self.p & 8)
def is_extractable(self):
return bool(self.p & 16)
def compute_u(self, key):
if self.r == 2:
# Algorithm 3.4
return ARC4.new(key).encrypt(self.PASSWORD_PADDING) # 2
else:
# Algorithm 3.5
hash = md5.md5(self.PASSWORD_PADDING) # 2
hash.update(self.docid[0]) # 3
result = ARC4.new(key).encrypt(hash.digest()) # 4
for i in range(1, 20): # 5
k = bytes( (c ^ i) for c in key )
result = ARC4.new(k).encrypt(result)
result += result # 6
return result
def compute_encryption_key(self, password):
# Algorithm 3.2
password = (password + self.PASSWORD_PADDING)[:32] # 1
hash = md5.md5(password) # 2
hash.update(self.o) # 3
hash.update(struct.pack('<l', self.p)) # 4
hash.update(self.docid[0]) # 5
if self.r >= 4:
if not self.encrypt_metadata:
hash.update(b'\xff\xff\xff\xff')
result = hash.digest()
n = 5
if self.r >= 3:
n = self.length // 8
for _ in range(50):
result = md5.md5(result[:n]).digest()
return result[:n]
def authenticate(self, password):
key = self.authenticate_user_password(password)
if key is None:
key = self.authenticate_owner_password(password)
return key
def authenticate_user_password(self, password):
key = self.compute_encryption_key(password)
if self.verify_encryption_key(key):
return key
else:
return None
def verify_encryption_key(self, key):
# Algorithm 3.6
u = self.compute_u(key)
if self.r == 2:
return u == self.u
return u[:16] == self.u[:16]
def authenticate_owner_password(self, password):
# Algorithm 3.7
password = (password + self.PASSWORD_PADDING)[:32]
hash = md5.md5(password)
if self.r >= 3:
for _ in range(50):
hash = md5.md5(hash.digest())
n = 5
if self.r >= 3:
n = self.length // 8
key = hash.digest()[:n]
if self.r == 2:
user_password = ARC4.new(key).decrypt(self.o)
else:
user_password = self.o
for i in range(19, -1, -1):
k = bytes( (c ^ i) for c in key )
user_password = ARC4.new(k).decrypt(user_password)
return self.authenticate_user_password(user_password)
def decrypt(self, objid, genno, data, attrs=None):
return self.decrypt_rc4(objid, genno, data)
def decrypt_rc4(self, objid, genno, data):
key = self.key + struct.pack('<L', objid)[:3] + struct.pack('<L', genno)[:2]
hash = md5.md5(key)
key = hash.digest()[:min(len(key), 16)]
return ARC4.new(key).decrypt(data)
|
PDFStandardSecurityHandler
|
python
|
MongoEngine__mongoengine
|
mongoengine/queryset/visitor.py
|
{
"start": 2505,
"end": 3502
}
|
class ____:
"""Base class for nodes in query trees."""
AND = 0
OR = 1
def to_query(self, document):
query = self.accept(SimplificationVisitor())
query = query.accept(QueryCompilerVisitor(document))
return query
def accept(self, visitor):
raise NotImplementedError
def _combine(self, other, operation):
"""Combine this node with another node into a QCombination
object.
"""
# If the other Q() is empty, ignore it and just use `self`.
if not bool(other):
return self
# Or if this Q is empty, ignore it and just use `other`.
if not bool(self):
return other
return QCombination(operation, [self, other])
@property
def empty(self):
warn_empty_is_deprecated()
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
|
QNode
|
python
|
tiangolo__fastapi
|
docs_src/schema_extra_example/tutorial005_py310.py
|
{
"start": 84,
"end": 1348
}
|
class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item = Body(
openapi_examples={
"normal": {
"summary": "A normal example",
"description": "A **normal** item works correctly.",
"value": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
},
"converted": {
"summary": "An example with converted data",
"description": "FastAPI can convert price `strings` to actual `numbers` automatically",
"value": {
"name": "Bar",
"price": "35.4",
},
},
"invalid": {
"summary": "Invalid data is rejected with an error",
"value": {
"name": "Baz",
"price": "thirty five point four",
},
},
},
),
):
results = {"item_id": item_id, "item": item}
return results
|
Item
|
python
|
Pylons__pyramid
|
src/pyramid/config/predicates.py
|
{
"start": 1699,
"end": 2704
}
|
class ____:
"""
You can invert the meaning of any predicate value by wrapping it in a call
to :class:`pyramid.config.not_`.
.. code-block:: python
:linenos:
from pyramid.config import not_
config.add_view(
'mypackage.views.my_view',
route_name='ok',
request_method=not_('POST')
)
The above example will ensure that the view is called if the request method
is *not* ``POST``, at least if no other view is more specific.
This technique of wrapping a predicate value in ``not_`` can be used
anywhere predicate values are accepted:
- :meth:`pyramid.config.Configurator.add_view`
- :meth:`pyramid.config.Configurator.add_route`
- :meth:`pyramid.config.Configurator.add_subscriber`
- :meth:`pyramid.view.view_config`
- :meth:`pyramid.events.subscriber`
.. versionadded:: 1.5
"""
def __init__(self, value):
self.value = value
# under = after
# over = before
|
not_
|
python
|
Textualize__textual
|
docs/examples/tutorial/stopwatch04.py
|
{
"start": 883,
"end": 1521
}
|
class ____(App):
"""A Textual app to manage stopwatches."""
CSS_PATH = "stopwatch04.tcss"
BINDINGS = [("d", "toggle_dark", "Toggle dark mode")]
def compose(self) -> ComposeResult:
"""Create child widgets for the app."""
yield Header()
yield Footer()
yield VerticalScroll(Stopwatch(), Stopwatch(), Stopwatch())
def action_toggle_dark(self) -> None:
"""An action to toggle dark mode."""
self.theme = (
"textual-dark" if self.theme == "textual-light" else "textual-light"
)
if __name__ == "__main__":
app = StopwatchApp()
app.run()
|
StopwatchApp
|
python
|
realpython__materials
|
django-view-auth/Blog/core/models.py
|
{
"start": 31,
"end": 134
}
|
class ____(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
|
Blog
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 12673,
"end": 12814
}
|
class ____(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.d = 2.
@KDTreeTest
|
_Test_random_ball_far
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP044.py
|
{
"start": 126,
"end": 545
}
|
class ____(Generic[Unpack[Shape]]):
pass
def f(*args: Unpack[tuple[int, ...]]):
pass
def f(*args: Unpack[other.Type]):
pass
def f(*args: Generic[int, Unpack[int]]):
pass
# Valid syntax, but can't be unpacked.
def f(*args: Unpack[int | str]) -> None:
pass
def f(*args: Unpack[int and str]) -> None:
pass
def f(*args: Unpack[int > str]) -> None:
pass
from typing import TypedDict
|
D
|
python
|
docker__docker-py
|
tests/unit/models_resources_test.py
|
{
"start": 105,
"end": 873
}
|
class ____(unittest.TestCase):
def test_reload(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.attrs['Name'] = "oldname"
container.reload()
assert client.api.inspect_container.call_count == 2
assert container.attrs['Name'] == "foobar"
def test_hash(self):
client = make_fake_client()
container1 = client.containers.get(FAKE_CONTAINER_ID)
my_set = {container1}
assert len(my_set) == 1
container2 = client.containers.get(FAKE_CONTAINER_ID)
my_set.add(container2)
assert len(my_set) == 1
image1 = client.images.get(FAKE_CONTAINER_ID)
my_set.add(image1)
assert len(my_set) == 2
|
ModelTest
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/distlib/locators.py
|
{
"start": 17308,
"end": 20110
}
|
class ____(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
# urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
|
PyPIJSONLocator
|
python
|
realpython__materials
|
python-sqlite-sqlalchemy/project/examples/example_3/config.py
|
{
"start": 251,
"end": 712
}
|
class ____:
base_path = Path(__file__).resolve().parent.parent.parent
db_path = base_path / "data" / "chinook.db"
SECRET_KEY = os.getenv("SECRET_KEY")
SQLALCHEMY_DATABASE_URI = f"sqlite:///{str(db_path)}"
SQLALCHEMY_TRACK_MODIFICATIONS = json.loads(
os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS").lower()
)
SQLALCHEMY_ECHO = json.loads(os.getenv("SQLALCHEMY_ECHO").lower())
DEBUG = json.loads(os.getenv("DEBUG").lower())
|
Config
|
python
|
google__jax
|
jax/_src/custom_partitioning.py
|
{
"start": 9247,
"end": 28452
}
|
class ____:
"""Inserts a CustomCallOp into the XLA graph with custom SPMD lowering rules.
.. code-block:: python
@custom_partitioning
def f(*args):
return ...
def propagate_user_sharding(mesh, user_shape):
'''Update the sharding of the op from a user's shape.sharding.'''
user_sharding = jax.tree.map(lambda x: x.sharding, user_shape)
def partition(mesh, arg_shapes, result_shape):
def lower_fn(*args):
... builds computation on per-device shapes ...
result_shardings = jax.tree.map(lambda x: x.sharding, result_shape)
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
# result_sharding and arg_shardings may optionally be modified and the
# partitioner will insert collectives to reshape.
return mesh, lower_fn, result_sharding, arg_shardings
def infer_sharding_from_operands(mesh, arg_shapes, shape):
'''Compute the result sharding from the sharding of the operands.'''
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
f.def_partition(partition, propagate_user_sharding,
infer_sharding_from_operands=infer_sharding_from_operands,
sharding_rule='i j -> 'i j')
The args to ``def_partition`` are as follows:
* ``propagate_user_sharding``: Callable which takes the sharding of a user (in the dag)
and returns a suggestion for a new `NamedSharding`. The default value is None.
A trivial implementation is just to return the input sharding.
* ``partition``: Callable which takes the SPMD suggested partition shapes and
partition specs and returns the mesh, a per-shard lowering function, and the final
input and output sharding specs (the SPMD partitioner will repartition the
inputs to match). The mesh is returned to allow configuring axis_names for
collectives when no mesh is provided.
* ``infer_sharding_from_operands``: Callable which computes an output ``NamedSharding``
from the ``NamedSharding`` chosen for each argument.
* ``decode_shardings``: When set to True, convert input ``GSPMDSharding``s to
``NamedSharding`` if possible. This may not be possible if the user does not
provide a contextual mesh.
* ``sharding_rule``: an SdyShardingRule object, an Einsum-like notation string
that describes the sharding rule, or a Callable that produces either of
these. We call the index labels in Einsum notation factors in our sharding
rule. We borrow the idea from the einops.rearrange string , to use a space
separator between factors and allow multiple letters factor names. By
default, a factor corresponds to a passthrough/elementwise dimension.
Factors corresponding to other dimensions can be specified via keyword
arguments described below. See
`jax-shardy-guide <https://colab.sandbox.google.com/github/openxla/shardy/blob/main/docs/getting_started_jax.ipynb>`_
for more details and examples.
* ``reduction_factors``: A tuple of strings, specifying the reduction factors
for a string `sharding_rule`. A reduction factor corresponds to a dimension
that appears in operands but not in the result, such as the contracting
dimensions in a matmul operation. If a reduction factor is sharded, the
result would need to be all-reduced along the same axes.
* ``need_replication_factors``: A tuple of strings, specifying the
need_replication factors for a string `sharding_rule`. A need_replication
factor corresponds to a dimension that shouldn't be sharded to support
the implementation.
* ``permutation_factors``: A tuple of strings, specifying the permutation
factors for a string `sharding_rule`. A permutation factor corresponds to a
dimension that would trigger collective permute if it is sharded.
* ``factor_sizes``: A dictionary of variable keyword arguments, specifying
the sizes of the factors that are only used in compound factors in a string
`sharding_rule`.
When config.use_shardy_partitioner.value is True, `sharding_rule` is used;
otherwise, `propagate_user_sharding` and `infer_sharding_from_operands` are
used.
Positional arguments can be specified as static using static_argnums. JAX uses
:code:`inspect.signature(fun)` to resolve these positional arguments.
Examples:
As an example, assume we want to enhance the existing ``jax.numpy.fft.fft``. This function computes
the discrete Fourier transform of an N-dimensional input along the last dimension, and is batched
along the first N-1 dimensions.
By default, however, it will ignore the sharding of the input and gather the input on all devices.
However, since ``jax.numpy.fft.fft`` is batched along the first N-1 dimensions,
this is unnecessary. We will create a new ``my_fft`` op that, instead, does not alter the sharding
along the first `N-1` dimensions, and only gathers the input along the last dimension if needed.
.. code-block:: python
import jax
from jax.sharding import NamedSharding
from jax.experimental.custom_partitioning import custom_partitioning
from jax.experimental.pjit import pjit
from jax.sharding import PartitionSpec as P
from jax.sharding import Mesh
from jax.numpy.fft import fft
import regex as re
import numpy as np
# Pattern to detect all-gather or dynamic-slice in the generated HLO
_PATTERN = '(dynamic-slice|all-gather)'
# For an N-D input, keeps sharding along the first N-1 dimensions
# but replicate along the last dimension
def supported_sharding(sharding, shape):
rank = len(shape.shape)
max_shared_dims = min(len(sharding.spec), rank-1)
names = tuple(sharding.spec[:max_shared_dims]) + tuple(None for _ in range(rank - max_shared_dims))
return NamedSharding(sharding.mesh, P(*names))
def partition(mesh, arg_shapes, result_shape):
result_shardings = jax.tree.map(lambda x: x.sharding, result_shape)
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
return mesh, fft, \
supported_sharding(arg_shardings[0], arg_shapes[0]), \
(supported_sharding(arg_shardings[0], arg_shapes[0]),)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
return supported_sharding(arg_shardings[0], arg_shapes[0])
@custom_partitioning
def my_fft(x):
return fft(x)
# Use Einsum-like notation to specify the sharding rule.
my_fft.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule='...i -> ...i')
# Use SdyShardingRule object to specify the sharding rule.
my_fft.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule=SdyShardingRule(operand_mappings=((BATCHING, 'i'),), result_mappings=((BATCHING, 'i'),))))
Now create a 2D array sharded along the first axis, pass it through ``my_fft``
and notice how it is still sharded as expected, and identical to the output
of ``fft``. However, inspecting the HLO
(using ``lower(x).compile().runtime_executable().hlo_modules()``) reveals that
``my_fft`` does not create any all-gather or dynamic-slice, while ``fft`` does.
.. code-block::
with Mesh(np.array(jax.devices()), ('x',)):
x = np.asarray(np.random.randn(32*1024, 1024), dtype=np.complex64)
y = pjit(lambda x: x, in_shardings=None, out_shardings=P('x'))(x)
pjit_my_fft = pjit(my_fft, in_shardings=P('x'), out_shardings=P('x'))
pjit_fft = pjit(fft, in_shardings=P('x'), out_shardings=P('x'))
print(pjit_my_fft(y))
print(pjit_fft(y))
# dynamic-slice or all-gather are not present in the HLO for my_fft, because x is a 2D array
assert(re.search(_PATTERN, pjit_my_fft.lower(x).compile().runtime_executable().hlo_modules()[0].to_string()) is None)
# dynamic-slice or all-gather are present in the HLO for fft
assert(re.search(_PATTERN, pjit_fft.lower(x).compile().runtime_executable().hlo_modules()[0].to_string()) is not None)
.. code-block::
# my_fft
[[-38.840824 +0.j -40.649452 +11.845365j
...
-1.6937828 +0.8402481j 15.999859 -4.0156755j]]
# jax.numpy.fft.fft
[[-38.840824 +0.j -40.649452 +11.845365j
...
-1.6937828 +0.8402481j 15.999859 -4.0156755j]]
Because of the logic in ``supported_sharding``, ``my_fft`` also works on 1-dimensional arrays.
However, in this case, the HLO of ``my_fft`` does show a dynamic-slice, since the last dimension
is the dimension along which FFTs are calculated and needs to be replicated on all devices before
the computation can be done.
.. code-block::
with Mesh(np.array(jax.devices()), ('x',)):
x = np.asarray(np.random.randn(32*1024*1024), dtype=np.complex64)
y = pjit(lambda x: x, in_shardings=None, out_shardings=P('x'))(x)
pjit_my_fft = pjit(my_fft, in_shardings=P('x'), out_shardings=P('x'))
pjit_fft = pjit(fft, in_shardings=P('x'), out_shardings=P('x'))
print(pjit_my_fft(y))
print(pjit_fft(y))
# dynamic-slice or all-gather are present in the HLO for my_fft, because x is a 1D array
assert(re.search(_PATTERN, pjit_my_fft.lower(x).compile().runtime_executable().hlo_modules()[0].to_string()) is None)
# dynamic-slice or all-gather are present in the HLO for fft
assert(re.search(_PATTERN, pjit_fft.lower(x).compile().runtime_executable().hlo_modules()[0].to_string()) is not None)
.. code-block::
# my_fft
[ 7.217285 +0.j -3012.4937 +4287.635j -405.83594 +3042.984j
... 1422.4502 +7271.4297j -405.84033 -3042.983j
-3012.4963 -4287.6343j]
# jax.numpy.fft.fft
[ 7.217285 +0.j -3012.4937 +4287.635j -405.83594 +3042.984j
... 1422.4502 +7271.4297j -405.84033 -3042.983j
-3012.4963 -4287.6343j]
"""
def __init__(self, fun, static_argnums=()):
self.fun = fun
self.partition = None
self.static_argnums = static_argnums
self.propagate_user_sharding = None
self.infer_sharding_from_operands = None
self.sharding_rule = None
__getattr__: Any = custom_api_util.forward_attr
def def_partition(self, partition, infer_sharding_from_operands=None,
propagate_user_sharding=None, decode_shardings=True,
sharding_rule=None, *, reduction_factors=(),
need_replication_factors=(), permutation_factors=(),
**factor_sizes):
self.partition = partition
self.propagate_user_sharding = propagate_user_sharding
self.infer_sharding_from_operands = infer_sharding_from_operands
self.decode_shardings = decode_shardings
if (sharding_rule is None or isinstance(sharding_rule, Callable) or
isinstance(sharding_rule, SdyShardingRule)):
sharding_rule_dict = factor_sizes
if len(reduction_factors) > 0:
sharding_rule_dict["reduction_factors"] = reduction_factors
if len(need_replication_factors) > 0:
sharding_rule_dict["need_replication_factors"] = need_replication_factors
if len(permutation_factors) > 0:
sharding_rule_dict["permutation_factors"] = permutation_factors
if sharding_rule_dict:
raise ValueError(f"Unknown keyword arguments: {sharding_rule_dict}")
self.sharding_rule = sharding_rule
else:
self.sharding_rule = str_to_sdy_sharding_rule(
sharding_rule,
reduction_factors=reduction_factors,
need_replication_factors=need_replication_factors,
permutation_factors=permutation_factors,
**factor_sizes)
return partition
def __call__(self, *args, **kwargs):
args = _resolve_kwargs(self.fun, args, kwargs)
debug = api_util.debug_info("custom_partitioning", self.fun,
args, {},
static_argnums=self.static_argnums)
if self.static_argnums:
static_argnums = set(self.static_argnums)
args = tuple(x if i in static_argnums else x for i, x in enumerate(args))
dyn_argnums = [i for i in range(len(args)) if i not in static_argnums]
f_, dyn_args = api_util.argnums_partial(
lu.wrap_init(self.fun, debug_info=debug),
dyn_argnums,
args,
require_static_args_hashable=False,
)
static_args = tuple(args[i] for i in self.static_argnums)
_check_for_tracers(static_args)
else:
static_args = ()
f_, dyn_args = lu.wrap_init(self.fun, debug_info=debug), args
args_flat, in_tree = tree_util.tree_flatten(dyn_args)
flat_fun, out_tree = api_util.flatten_fun_nokwargs(f_, in_tree)
in_avals = [core.get_aval(x) for x in args_flat]
mesh = mesh_lib.thread_resources.env.physical_mesh
with core.extend_axis_env_nd(mesh.shape.items()):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals)
assert not len(consts)
closed_call = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
propagate_user_sharding = None
infer_sharding_from_operands = None
sharding_rule = None
if config.use_shardy_partitioner.value:
if (self.sharding_rule is None and
(self.propagate_user_sharding is not None or
self.infer_sharding_from_operands is not None)):
raise NotImplementedError(
"Shardy is used, but sharding propagation callbacks instead of "
"sharding_rule are provided. Need to provide sharding_rule to "
"migrate to Shardy."
)
sharding_rule = self.sharding_rule
else:
propagate_user_sharding = self.propagate_user_sharding
infer_sharding_from_operands = self.infer_sharding_from_operands
out_flat = custom_partitioning_p.bind(
*consts,
*args_flat,
call=closed_call,
partition=self.partition,
propagate_user_sharding=propagate_user_sharding,
infer_sharding_from_operands=infer_sharding_from_operands,
decode_shardings=self.decode_shardings,
sharding_rule=sharding_rule,
in_tree=in_tree,
out_tree=out_tree(),
static_args=static_args
)
return tree_util.tree_unflatten(out_tree(), out_flat)
def _custom_partitioning_lowering_rule(ctx: mlir.LoweringRuleContext, *values,
call, in_tree, out_tree,
propagate_user_sharding, partition,
infer_sharding_from_operands,
decode_shardings,
sharding_rule,
static_args):
axis_context = ctx.module_context.axis_context
if (isinstance(axis_context, sharding_impls.SPMDAxisContext) and
set(axis_context.manual_axes) == set(axis_context.mesh.axis_names)):
return mlir.lower_fun(core.jaxpr_as_fun(call), multiple_results=True)(ctx, *values)
mesh = mesh_lib.thread_resources.env.physical_mesh
if isinstance(axis_context, sharding_impls.ShardingContext):
devices = axis_context.device_assignment
if devices is None:
raise AssertionError(
'Please file a bug at https://github.com/jax-ml/jax/issues')
am = axis_context.abstract_mesh
if am is not None:
mesh = mesh_lib.Mesh(np.array(devices).reshape(am.axis_sizes),
am.axis_names)
elif isinstance(axis_context, sharding_impls.SPMDAxisContext):
devices = axis_context.mesh._flat_devices_tuple
else:
devices = None
if not devices or len(devices) == 1:
return mlir.lower_fun(
core.jaxpr_as_fun(call), multiple_results=True)(ctx, *values)
def to_mesh_pspec_sharding(hlo_sharding: xc.HloSharding | None, ndim):
if hlo_sharding is None:
return hlo_sharding
if mesh.empty or not decode_shardings:
assert devices is not None
return sharding_impls.GSPMDSharding(devices, hlo_sharding)
pspec = sharding_impls.parse_flatten_op_sharding(
hlo_sharding, mesh)[0]
pspec = sharding_impls.PartitionSpec(*pspec, *((None,) * (ndim - len(pspec))))
return sharding_impls.NamedSharding(mesh, pspec)
sharding_callback_info = _ShardingCallbackInfo(propagate_user_sharding,
partition, to_mesh_pspec_sharding, in_tree, out_tree,
infer_sharding_from_operands, ctx.module_context, mesh, static_args)
key = str(id(sharding_callback_info))
_sharding_callbacks[bytes(key, 'utf8')] = sharding_callback_info
# We need to make sure `sharding_callback_info` is still alive when the SPMD
# partitioner runs so we keep it alive by attaching it to the executable.
ctx.module_context.add_keepalive(sharding_callback_info)
result_types = [mlir.aval_to_ir_type(s) for s in call.out_avals]
out = hlo.CustomCallOp(
result_types,
list(values),
call_target_name=ir.StringAttr.get(_CUSTOM_PARTITIONING_CALL_NAME),
has_side_effect=ir.BoolAttr.get(False),
api_version=mlir.i32_attr(2),
called_computations=ir.ArrayAttr.get([]),
backend_config=ir.StringAttr.get(key),
operand_layouts=None,
result_layouts=None)
if sharding_rule is not None:
value_types = [mlir.aval_to_ir_type(s) for s in call.in_avals]
if callable(sharding_rule):
sharding_rule = sharding_rule(*static_args, mesh, value_types, result_types)
if isinstance(sharding_rule, (list, tuple)) and len(sharding_rule) == 2:
sharding_rule, sharding_rule_dict = sharding_rule
else:
sharding_rule_dict = {}
if isinstance(sharding_rule, str):
sharding_rule = str_to_sdy_sharding_rule(sharding_rule, **sharding_rule_dict)
elif not isinstance(sharding_rule, SdyShardingRule):
raise ValueError("sharding_rule callable must produce either an "
"SdyShardingRule object or an Einsum-like notation "
"string.")
out.attributes['sdy.sharding_rule'] = sdy_sharding_rule_to_mlir(
sharding_rule, value_types, result_types)
return out.results
mlir.register_lowering(custom_partitioning_p,
_custom_partitioning_lowering_rule)
xc.register_custom_call_partitioner(
_CUSTOM_PARTITIONING_CALL_NAME,
_custom_partitioning_propagate_user_sharding,
_custom_partitioning_partition,
_custom_partitioning_infer_sharding_from_operands,
can_side_effecting_have_replicated_sharding=True,
)
xb.register_plugin_callbacks(
partial(
xc.register_custom_call_partitioner,
name=_CUSTOM_PARTITIONING_CALL_NAME,
prop_user_sharding=_custom_partitioning_propagate_user_sharding,
partition=_custom_partitioning_partition,
infer_sharding_from_operands=_custom_partitioning_infer_sharding_from_operands,
can_side_effecting_have_replicated_sharding=True,
)
)
|
custom_partitioning
|
python
|
ray-project__ray
|
release/nightly_tests/multimodal_inference_benchmarks/video_object_detection/daft_main.py
|
{
"start": 922,
"end": 2202
}
|
class ____:
def __init__(self):
self.model = YOLO(YOLO_MODEL)
if torch.cuda.is_available():
self.model.to("cuda")
def to_features(self, res):
return [
{
"label": label,
"confidence": confidence.item(),
"bbox": bbox.tolist(),
}
for label, confidence, bbox in zip(
res.names, res.boxes.conf, res.boxes.xyxy
)
]
def __call__(self, images):
if len(images) == 0:
return []
batch = [
torchvision.transforms.functional.to_tensor(Image.fromarray(image))
for image in images
]
stack = torch.stack(batch, dim=0)
return daft.Series.from_pylist(
[self.to_features(res) for res in self.model(stack)]
)
daft.context.set_runner_ray()
df = daft.read_video_frames(
INPUT_PATH,
image_height=IMAGE_HEIGHT,
image_width=IMAGE_WIDTH,
)
df = df.with_column("features", ExtractImageFeatures(col("data")))
df = df.explode("features")
df = df.with_column(
"object",
daft.col("data").image.crop(daft.col("features")["bbox"]).image.encode("png"),
)
df = df.exclude("data")
df.write_parquet(OUTPUT_PATH)
|
ExtractImageFeatures
|
python
|
pytorch__pytorch
|
test/distributed/launcher/launch_test.py
|
{
"start": 651,
"end": 2892
}
|
class ____(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_without_env(self):
nnodes = 1
nproc_per_node = 4
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
"--master-addr=localhost",
f"--master-port={master_port}",
"--node-rank=0",
path("bin/test_script_local_rank.py"),
]
launch.main(args)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_with_env(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
"--master-addr=localhost",
f"--master-port={master_port}",
"--node-rank=0",
"--use-env",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
|
LaunchTest
|
python
|
sympy__sympy
|
sympy/physics/biomechanics/activation.py
|
{
"start": 7011,
"end": 14269
}
|
class ____(ActivationBase):
"""Simple zeroth-order activation dynamics mapping excitation to
activation.
Explanation
===========
Zeroth-order activation dynamics are useful in instances where you want to
reduce the complexity of your musculotendon dynamics as they simple map
exictation to activation. As a result, no additional state equations are
introduced to your system. They also remove a potential source of delay
between the input and dynamics of your system as no (ordinary) differential
equations are involved.
"""
def __init__(self, name):
"""Initializer for ``ZerothOrderActivation``.
Parameters
==========
name : str
The name identifier associated with the instance. Must be a string
of length at least 1.
"""
super().__init__(name)
# Zeroth-order activation dynamics has activation equal excitation so
# overwrite the symbol for activation with the excitation symbol.
self._a = self._e
@classmethod
def with_defaults(cls, name):
"""Alternate constructor that provides recommended defaults for
constants.
Explanation
===========
As this concrete class doesn't implement any constants associated with
its dynamics, this ``classmethod`` simply creates a standard instance
of ``ZerothOrderActivation``. An implementation is provided to ensure
a consistent interface between all ``ActivationBase`` concrete classes.
"""
return cls(name)
@property
def order(self):
"""Order of the (differential) equation governing activation."""
return 0
@property
def state_vars(self):
"""Ordered column matrix of functions of time that represent the state
variables.
Explanation
===========
As zeroth-order activation dynamics simply maps excitation to
activation, this class has no associated state variables and so this
property return an empty column ``Matrix`` with shape (0, 1).
The alias ``x`` can also be used to access the same attribute.
"""
return zeros(0, 1)
@property
def x(self):
"""Ordered column matrix of functions of time that represent the state
variables.
Explanation
===========
As zeroth-order activation dynamics simply maps excitation to
activation, this class has no associated state variables and so this
property return an empty column ``Matrix`` with shape (0, 1).
The alias ``state_vars`` can also be used to access the same attribute.
"""
return zeros(0, 1)
@property
def input_vars(self):
"""Ordered column matrix of functions of time that represent the input
variables.
Explanation
===========
Excitation is the only input in zeroth-order activation dynamics and so
this property returns a column ``Matrix`` with one entry, ``e``, and
shape (1, 1).
The alias ``r`` can also be used to access the same attribute.
"""
return Matrix([self._e])
@property
def r(self):
"""Ordered column matrix of functions of time that represent the input
variables.
Explanation
===========
Excitation is the only input in zeroth-order activation dynamics and so
this property returns a column ``Matrix`` with one entry, ``e``, and
shape (1, 1).
The alias ``input_vars`` can also be used to access the same attribute.
"""
return Matrix([self._e])
@property
def constants(self):
"""Ordered column matrix of non-time varying symbols present in ``M``
and ``F``.
Only symbolic constants are returned. If a numeric type (e.g. ``Float``)
has been used instead of ``Symbol`` for a constant then that attribute
will not be included in the matrix returned by this property. This is
because the primary use of this property attribute is to provide an
ordered sequence of the still-free symbols that require numeric values
during code generation.
Explanation
===========
As zeroth-order activation dynamics simply maps excitation to
activation, this class has no associated constants and so this property
return an empty column ``Matrix`` with shape (0, 1).
The alias ``p`` can also be used to access the same attribute.
"""
return zeros(0, 1)
@property
def p(self):
"""Ordered column matrix of non-time varying symbols present in ``M``
and ``F``.
Only symbolic constants are returned. If a numeric type (e.g. ``Float``)
has been used instead of ``Symbol`` for a constant then that attribute
will not be included in the matrix returned by this property. This is
because the primary use of this property attribute is to provide an
ordered sequence of the still-free symbols that require numeric values
during code generation.
Explanation
===========
As zeroth-order activation dynamics simply maps excitation to
activation, this class has no associated constants and so this property
return an empty column ``Matrix`` with shape (0, 1).
The alias ``constants`` can also be used to access the same attribute.
"""
return zeros(0, 1)
@property
def M(self):
"""Ordered square matrix of coefficients on the LHS of ``M x' = F``.
Explanation
===========
The square matrix that forms part of the LHS of the linear system of
ordinary differential equations governing the activation dynamics:
``M(x, r, t, p) x' = F(x, r, t, p)``.
As zeroth-order activation dynamics have no state variables, this
linear system has dimension 0 and therefore ``M`` is an empty square
``Matrix`` with shape (0, 0).
"""
return Matrix([])
@property
def F(self):
"""Ordered column matrix of equations on the RHS of ``M x' = F``.
Explanation
===========
The column matrix that forms the RHS of the linear system of ordinary
differential equations governing the activation dynamics:
``M(x, r, t, p) x' = F(x, r, t, p)``.
As zeroth-order activation dynamics have no state variables, this
linear system has dimension 0 and therefore ``F`` is an empty column
``Matrix`` with shape (0, 1).
"""
return zeros(0, 1)
def rhs(self):
"""Ordered column matrix of equations for the solution of ``M x' = F``.
Explanation
===========
The solution to the linear system of ordinary differential equations
governing the activation dynamics:
``M(x, r, t, p) x' = F(x, r, t, p)``.
As zeroth-order activation dynamics have no state variables, this
linear has dimension 0 and therefore this method returns an empty
column ``Matrix`` with shape (0, 1).
"""
return zeros(0, 1)
|
ZerothOrderActivation
|
python
|
apache__airflow
|
providers/openlineage/src/airflow/providers/openlineage/utils/utils.py
|
{
"start": 29744,
"end": 30340
}
|
class ____(InfoJsonEncodable):
"""Defines encoding TaskInstance object to JSON."""
includes = ["duration", "try_number", "pool", "queued_dttm", "log_url"]
casts = {
"log_url": lambda ti: getattr(ti, "log_url", None),
"map_index": lambda ti: ti.map_index if getattr(ti, "map_index", -1) != -1 else None,
"dag_bundle_version": lambda ti: (
ti.bundle_instance.version if hasattr(ti, "bundle_instance") else None
),
"dag_bundle_name": lambda ti: ti.bundle_instance.name if hasattr(ti, "bundle_instance") else None,
}
|
TaskInstanceInfo
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/elements.py
|
{
"start": 120031,
"end": 120717
}
|
class ____(ColumnElement[int]):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = "extract"
_traverse_internals: _TraverseInternalsType = [
("expr", InternalTraversal.dp_clauseelement),
("field", InternalTraversal.dp_string),
]
expr: ColumnElement[Any]
field: str
def __init__(self, field: str, expr: _ColumnExpressionArgument[Any]):
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = coercions.expect(roles.ExpressionElementRole, expr)
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return self.expr._from_objects
|
Extract
|
python
|
walkccc__LeetCode
|
solutions/1040. Moving Stones Until Consecutive II/1040.py
|
{
"start": 0,
"end": 521
}
|
class ____:
def numMovesStonesII(self, stones: list[int]) -> list[int]:
n = len(stones)
minMoves = n
stones.sort()
l = 0
for r, stone in enumerate(stones):
while stone - stones[l] + 1 > n:
l += 1
alreadyStored = r - l + 1
if alreadyStored == n - 1 and stone - stones[l] + 1 == n - 1:
minMoves = 2
else:
minMoves = min(minMoves, n - alreadyStored)
return [minMoves, max(stones[n - 1] - stones[1] - n + 2, stones[n - 2] - stones[0] - n + 2)]
|
Solution
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 17784,
"end": 18205
}
|
class ____(Function):
_required_standard = 77
def _fcode(self, printer):
name = self.__class__.__name__
if printer._settings['standard'] < self._required_standard:
raise NotImplementedError("%s requires Fortran %d or newer" %
(name, self._required_standard))
return '{}({})'.format(name, ', '.join(map(printer._print, self.args)))
|
FFunction
|
python
|
paramiko__paramiko
|
demos/forward.py
|
{
"start": 1394,
"end": 1507
}
|
class ____(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
|
ForwardServer
|
python
|
tqdm__tqdm
|
tqdm/contrib/telegram.py
|
{
"start": 544,
"end": 2815
}
|
class ____(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super().__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.",
TqdmWarning, stacklevel=2)
else:
self._message_id = res['result']['message_id']
return self._message_id
def write(self, s):
"""Replaces internal `message_id`'s text with `s`."""
if not s:
s = "..."
s = s.replace('\r', '').strip()
if s == self.text:
return # avoid duplicate message Bot error
message_id = self.message_id
if message_id is None:
return
self.text = s
try:
future = self.submit(
self.session.post, self.API + '%s/editMessageText' % self.token,
data={'text': '`' + s + '`', 'chat_id': self.chat_id,
'message_id': message_id, 'parse_mode': 'MarkdownV2'})
except Exception as e:
tqdm_auto.write(str(e))
else:
return future
def delete(self):
"""Deletes internal `message_id`."""
try:
future = self.submit(
self.session.post, self.API + '%s/deleteMessage' % self.token,
data={'chat_id': self.chat_id, 'message_id': self.message_id})
except Exception as e:
tqdm_auto.write(str(e))
else:
return future
|
TelegramIO
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/pipes/utils.py
|
{
"start": 10154,
"end": 21442
}
|
class ____(PipesMessageReader):
"""A base class for message readers that read messages and logs in background threads.
Args:
interval (float): The interval in seconds at which to poll for messages.
log_readers (Optional[Sequence[PipesLogReader]]): A set of log readers to use to read logs.
"""
interval: float
log_readers: dict[str, "PipesLogReader"]
opened_payload: Optional[PipesOpenedData]
launched_payload: Optional[PipesLaunchedData]
def __init__(
self,
interval: float = 10,
log_readers: Optional[Sequence["PipesLogReader"]] = None,
):
self.interval = interval
self.log_readers = {
str(i): reader
for i, reader in enumerate(
check.opt_sequence_param(log_readers, "log_readers", of_type=PipesLogReader)
)
}
self.opened_payload = None
self.launched_payload = None
@contextmanager
def read_messages(
self,
handler: "PipesMessageHandler",
) -> Iterator[PipesParams]:
"""Set up a thread to read streaming messages by periodically reading message chunks from a
target location.
Args:
handler (PipesMessageHandler): object to process incoming messages
Yields:
PipesParams: A dict of parameters that specifies where a pipes process should write
pipes protocol message chunks.
"""
with self.get_params() as params:
is_session_closed = Event()
messages_thread = None
logs_thread = None
try:
messages_thread = Thread(
target=self._messages_thread,
args=(handler, params, is_session_closed),
daemon=True,
)
messages_thread.start()
logs_thread = Thread(
target=self._logs_thread,
args=(handler, params, is_session_closed, messages_thread),
daemon=True,
)
logs_thread.start()
yield params
finally:
is_session_closed.set()
if messages_thread:
_join_thread(messages_thread, "messages")
if logs_thread:
_join_thread(logs_thread, "logs")
def on_opened(self, opened_payload: PipesOpenedData) -> None:
self.opened_payload = opened_payload
def on_launched(self, launched_payload: PipesLaunchedData) -> None:
self.launched_payload = launched_payload
def add_log_reader(self, log_reader: "PipesLogReader") -> None:
"""Can be used to attach extra log readers to the message reader.
Typically called when the target for reading logs is not known until after the external
process has started (for example, when the target depends on an external job_id).
The LogReader will be eventually started by the PipesThreadedMessageReader.
"""
key = str(len(self.log_readers))
self.log_readers[key] = log_reader
@abstractmethod
def messages_are_readable(self, params: PipesParams) -> bool: ...
@abstractmethod
@contextmanager
def get_params(self) -> Iterator[PipesParams]:
"""Yield a set of parameters to be passed to a message writer in a pipes process.
Yields:
PipesParams: A dict of parameters that specifies where a pipes process should write
pipes protocol message chunks.
"""
@abstractmethod
def download_messages(
self, cursor: Optional[TCursor], params: PipesParams
) -> Optional[tuple[TCursor, str]]:
"""Download a chunk of messages from the target location.
Args:
cursor (Optional[Any]): Cursor specifying start location from which to download
messages in a stream. The format of the value varies with the message reader
implementation. It might be an integer index for a line in a log file, or a
timestamp for a message in a time-indexed stream.
params (PipesParams): A dict of parameters that specifies where to download messages from.
"""
...
def _messages_thread(
self,
handler: "PipesMessageHandler",
params: PipesParams,
is_session_closed: Event,
) -> None:
try:
start_or_last_download = datetime.datetime.now()
session_closed_at = None
cursor = None
can_read_messages = False
# main loop to read messages
# at every step, we:
# - exit early if we have received the closed message
# - consume params from the launched_payload if possible
# - check if we can start reading messages (e.g. log files are available)
# - download a chunk of messages and process them
# - if is_session_closed is set, we exit the loop after waiting for WAIT_FOR_LOGS_AFTER_EXECUTION_INTERVAL
while True:
# if we have the closed message, we can exit
# since the message reader has been started and the external process has completed
if handler.received_closed_message:
return
if not can_read_messages: # this branch will be executed until we can read messages
# check for new params in case they have been updated
params = {**params, **(self.launched_payload or {})}
can_read_messages = self.messages_are_readable(params)
now = datetime.datetime.now()
if (
now - start_or_last_download
).seconds > self.interval or is_session_closed.is_set():
if can_read_messages:
start_or_last_download = now
result = self.download_messages(cursor, params)
if result is not None:
cursor, chunk = result
for line in chunk.split("\n"):
try:
message = json.loads(line)
if PIPES_PROTOCOL_VERSION_FIELD in message.keys():
handler.handle_message(message)
except json.JSONDecodeError:
pass
time.sleep(DEFAULT_SLEEP_INTERVAL)
if is_session_closed.is_set():
if session_closed_at is None:
session_closed_at = datetime.datetime.now()
# After the external process has completed, we don't want to immediately exit
if (
datetime.datetime.now() - session_closed_at
).seconds > WAIT_FOR_LOGS_AFTER_EXECUTION_INTERVAL:
if not can_read_messages:
self._log_unstartable_warning(handler, params)
return
except:
handler.report_pipes_framework_exception(
f"{self.__class__.__name__} messages thread",
sys.exc_info(),
)
raise
def _log_unstartable_warning(self, handler: PipesMessageHandler, params: PipesParams) -> None:
if self.launched_payload is not None:
handler._context.log.warning( # noqa: SLF001
f"[pipes] Target of {self.__class__.__name__} is not readable after receiving extra params from the external process (`on_launched` has been called)"
)
else:
handler._context.log.warning( # noqa: SLF001
f"[pipes] Target of {self.__class__.__name__} is not readable."
)
def _logs_thread(
self,
handler: "PipesMessageHandler",
params: PipesParams,
is_session_closed: Event,
messages_thread: Thread,
) -> None:
wait_for_logs_start = None
# Loop over all log readers and start them if the target is readable, which typically means
# a file exists at the target location. Different execution environments may write logs at
# different times (e.g., some may write logs periodically during execution, while others may
# only write logs after the process has completed).
try:
unstarted_log_readers = {**self.log_readers}
while True:
if self.opened_payload is not None:
params = {**params, **self.opened_payload}
# periodically check for new readers which may be added after the
# external process has started and add them to the unstarted log readers
for key in self.log_readers:
if key not in unstarted_log_readers:
unstarted_log_readers[key] = self.log_readers[key]
for key in list(unstarted_log_readers.keys()).copy():
if unstarted_log_readers[key].target_is_readable(params):
reader = unstarted_log_readers.pop(key)
reader.start(params, is_session_closed)
# In some cases logs might not be written out until after the external process has
# exited. That will leave us in this state, where some log readers have not been
# started even though the external process is finished. We start a timer and wait
# for up to WAIT_FOR_LOGS_TIMEOUT seconds for the logs to be written. If they are
# not written after this amount of time has elapsed, we warn the user and bail.
if is_session_closed.is_set():
if wait_for_logs_start is None:
wait_for_logs_start = datetime.datetime.now()
if not unstarted_log_readers:
return
elif (
unstarted_log_readers
and (datetime.datetime.now() - wait_for_logs_start).seconds
> WAIT_FOR_LOGS_TIMEOUT
):
for key, log_reader in unstarted_log_readers.items():
warnings.warn(
log_reader.with_debug_info(
f"[pipes] Attempted to read log for reader {log_reader.name} but log was"
f" still not written {WAIT_FOR_LOGS_TIMEOUT} seconds after session close. Abandoning reader {key}."
)
)
return
time.sleep(DEFAULT_SLEEP_INTERVAL)
except Exception:
handler.report_pipes_framework_exception(
f"{self.__class__.__name__} logs thread",
sys.exc_info(),
)
raise
finally:
for log_reader in self.log_readers.values():
if log_reader.is_running():
log_reader.stop()
@public
|
PipesThreadedMessageReader
|
python
|
pydantic__pydantic
|
tests/test_docs.py
|
{
"start": 1580,
"end": 10409
}
|
class ____(datetime):
@classmethod
def now(cls, *args, tz=None, **kwargs):
return datetime(2032, 1, 2, 3, 4, 5, 6, tzinfo=tz)
skip_reason = skip_docs_tests()
LINE_LENGTH = 80
TARGET_VERSION = 'py39'
def print_callback(print_statement: str) -> str:
return re.sub(r'(https://errors.pydantic.dev)/.+?/', r'\1/2/', print_statement)
def run_example(example: CodeExample, eval_example: EvalExample, mocker: Any) -> None: # noqa C901
eval_example.print_callback = print_callback
prefix_settings = example.prefix_settings()
test_settings = prefix_settings.get('test', '')
lint_settings = prefix_settings.get('lint', '')
if test_settings.startswith('skip') and lint_settings.startswith('skip'):
pytest.skip('both running code and lint skipped')
requires_settings = prefix_settings.get('requires')
if requires_settings:
major, minor = map(int, requires_settings.split('.'))
if sys.version_info < (major, minor):
pytest.skip(f'requires python {requires_settings}')
group_name = prefix_settings.get('group')
eval_example.set_config(
ruff_ignore=['D', 'T', 'B', 'C4', 'E721', 'Q001', 'PERF', 'PIE790'],
line_length=LINE_LENGTH,
target_version=TARGET_VERSION,
)
if '# ignore-above' in example.source:
eval_example.set_config(
ruff_ignore=eval_example.config.ruff_ignore + ['E402'],
line_length=LINE_LENGTH,
target_version=TARGET_VERSION,
)
if group_name:
eval_example.set_config(
ruff_ignore=eval_example.config.ruff_ignore + ['F821'],
line_length=LINE_LENGTH,
target_version=TARGET_VERSION,
)
if not lint_settings.startswith('skip'):
if eval_example.update_examples:
eval_example.format(example)
else:
if example.in_py_file():
# Ignore isort as double newlines will cause it to fail, but we remove them in py files
eval_example.set_config(
ruff_ignore=eval_example.config.ruff_ignore + ['I001'],
line_length=LINE_LENGTH,
target_version=TARGET_VERSION,
)
eval_example.lint(example)
if test_settings.startswith('skip'):
pytest.skip(test_settings[4:].lstrip(' -') or 'running code skipped')
group_name = prefix_settings.get('group')
d = group_globals.get(group_name)
mocker.patch('datetime.datetime', MockedDatetime)
mocker.patch('random.randint', return_value=3)
xfail = None
if test_settings.startswith('xfail'):
xfail = test_settings[5:].lstrip(' -')
rewrite_assertions = prefix_settings.get('rewrite_assert', 'true') == 'true'
try:
if test_settings == 'no-print-intercept':
d2 = eval_example.run(example, module_globals=d, rewrite_assertions=rewrite_assertions)
elif eval_example.update_examples:
d2 = eval_example.run_print_update(example, module_globals=d, rewrite_assertions=rewrite_assertions)
else:
d2 = eval_example.run_print_check(example, module_globals=d, rewrite_assertions=rewrite_assertions)
except BaseException as e: # run_print_check raises a BaseException
if xfail:
pytest.xfail(f'{xfail}, {type(e).__name__}: {e}')
raise
else:
if xfail:
pytest.fail('expected xfail')
group_globals.set(group_name, d2)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings('ignore:(parse_obj_as|schema_json_of|schema_of) is deprecated.*:DeprecationWarning')
@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
@pytest.mark.parametrize('example', find_examples(str(SOURCES_ROOT), skip=sys.platform == 'win32'), ids=str)
def test_docstrings_examples(example: CodeExample, eval_example: EvalExample, tmp_path: Path, mocker):
if str(example.path).startswith(str(SOURCES_ROOT / 'v1')):
pytest.skip('skip v1 examples')
run_example(example, eval_example, mocker)
@pytest.fixture(scope='module', autouse=True)
def set_cwd():
# `test_docs_examples` needs to be run from this folder or relative paths will be wrong and some tests fail
execution_path = str(DOCS_ROOT.parent)
cwd = os.getcwd()
os.chdir(execution_path)
try:
yield
finally:
os.chdir(cwd)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings('ignore:(parse_obj_as|schema_json_of|schema_of) is deprecated.*:DeprecationWarning')
@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
@pytest.mark.parametrize('example', find_examples(str(DOCS_ROOT), skip=sys.platform == 'win32'), ids=str)
def test_docs_examples(example: CodeExample, eval_example: EvalExample, tmp_path: Path, mocker):
global INDEX_MAIN
if example.path.name == 'index.md':
if INDEX_MAIN is None:
INDEX_MAIN = example.source
else:
(tmp_path / 'index_main.py').write_text(INDEX_MAIN)
sys.path.append(str(tmp_path))
if example.path.name == 'devtools.md':
pytest.skip('tested below')
run_example(example, eval_example, mocker)
@pytest.mark.thread_unsafe
@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
@pytest.mark.skipif(sys.version_info >= (3, 13), reason='python-devtools does not yet support python 3.13')
@pytest.mark.parametrize(
'example', find_examples(str(DOCS_ROOT / 'integrations/devtools.md'), skip=sys.platform == 'win32'), ids=str
)
def test_docs_devtools_example(example: CodeExample, eval_example: EvalExample, tmp_path: Path):
from ansi2html import Ansi2HTMLConverter
eval_example.set_config(ruff_ignore=['D', 'T', 'B', 'C4'], line_length=LINE_LENGTH, target_version=TARGET_VERSION)
if eval_example.update_examples:
eval_example.format(example)
else:
eval_example.lint(example)
with NamedTemporaryFile(mode='w', suffix='.py') as f:
f.write(example.source)
f.flush()
os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'
p = subprocess.run((sys.executable, f.name), stdout=subprocess.PIPE, check=True, encoding='utf8')
conv = Ansi2HTMLConverter()
# replace ugly file path with "devtools_example.py"
output = re.sub(r'/.+?\.py', 'devtools_example.py', p.stdout)
output_html = conv.convert(output, full=False)
output_html = (
'<!-- DO NOT EDIT MANUALLY: '
'Generated by tests/test_docs.py::test_docs_devtools_example for use in docs -->\n'
f'{output_html}'
)
output_file = DOCS_ROOT / 'plugins/devtools_output.html'
if eval_example.update_examples:
output_file.write_text(output_html)
elif not output_file.exists():
pytest.fail(f'output file {output_file} does not exist')
else:
assert output_html == output_file.read_text()
def test_error_codes():
error_text = (DOCS_ROOT / 'errors/usage_errors.md').read_text()
code_error_codes = PydanticErrorCodes.__args__
documented_error_codes = tuple(re.findall(r'^## .+ \{#(.+?)}$', error_text, flags=re.MULTILINE))
assert code_error_codes == documented_error_codes, 'Error codes in code and docs do not match'
def test_validation_error_codes():
error_text = (DOCS_ROOT / 'errors/validation_errors.md').read_text()
expected_validation_error_codes = set(core_schema.ErrorType.__args__)
# Remove codes that are not currently accessible from pydantic:
expected_validation_error_codes.remove('timezone_offset') # not currently exposed for configuration in pydantic
test_failures = []
documented_validation_error_codes = []
error_code_section = None
printed_error_code = None
for line in error_text.splitlines():
section_match = re.fullmatch(r'## `(.+)`', line)
if section_match:
if error_code_section is not None and printed_error_code != error_code_section:
test_failures.append(f'Error code {error_code_section!r} is not printed in its example')
error_code_section = section_match.group(1)
if error_code_section not in expected_validation_error_codes:
test_failures.append(f'Documented error code {error_code_section!r} is not a member of ErrorType')
documented_validation_error_codes.append(error_code_section)
printed_error_code = None
continue
printed_match = re.search("#> '(.+)'", line)
if printed_match:
printed_error_code = printed_match.group(1)
assert test_failures == []
code_validation_error_codes = sorted(expected_validation_error_codes)
assert code_validation_error_codes == documented_validation_error_codes, 'Error codes in code and docs do not match'
|
MockedDatetime
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 5846,
"end": 7496
}
|
class ____(_LeafWithoutNewlines):
"""
A string. Sometimes it is important to know if the string belongs to a name
or not.
"""
type = 'name'
__slots__ = ()
def __repr__(self):
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
self.line, self.column)
def is_definition(self, include_setitem=False):
"""
Returns True if the name is being defined.
"""
return self.get_definition(include_setitem=include_setitem) is not None
def get_definition(self, import_name_always=False, include_setitem=False):
"""
Returns None if there's no definition for a name.
:param import_name_always: Specifies if an import name is always a
definition. Normally foo in `from foo import bar` is not a
definition.
"""
node = self.parent
type_ = node.type
if type_ in ('funcdef', 'classdef'):
if self == node.name:
return node
return None
if type_ == 'except_clause':
if self.get_previous_sibling() == 'as':
return node.parent # The try_stmt.
return None
while node is not None:
if node.type == 'suite':
return None
if node.type in _GET_DEFINITION_TYPES:
if self in node.get_defined_names(include_setitem):
return node
if import_name_always and node.type in _IMPORTS:
return node
return None
node = node.parent
return None
|
Name
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/first_insight_span_sent.py
|
{
"start": 80,
"end": 295
}
|
class ____(analytics.Event):
organization_id: int
user_id: int | None
project_id: int
module: str
platform: str | None = None
analytics.register(FirstInsightSpanSentEvent)
|
FirstInsightSpanSentEvent
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/callback.py
|
{
"start": 2974,
"end": 3230
}
|
class ____(ImportPathCallbackDefProtocol, Protocol):
"""Protocol for callbacks that use the import path fetch method and have an executor attribute to specify the executor to run them on."""
executor: str | None
|
ImportPathExecutorCallbackDefProtocol
|
python
|
getsentry__sentry
|
src/sentry/integrations/api/endpoints/organization_integration_channels.py
|
{
"start": 6135,
"end": 7724
}
|
class ____(OrganizationIntegrationBaseEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
def get(
self,
request: Request,
organization_context: RpcUserOrganizationContext,
integration_id: int,
**kwargs: Any,
) -> Response:
"""
List all messaging channels for an integration.
"""
integration = self.get_integration(organization_context.organization.id, integration_id)
try:
match integration.provider:
case IntegrationProviderSlug.SLACK.value:
results = _slack_list_channels(integration_id=integration.id)
case IntegrationProviderSlug.DISCORD.value:
results = _discord_list_channels(guild_id=str(integration.external_id))
case IntegrationProviderSlug.MSTEAMS.value:
results = _msteams_list_channels(
integration=integration,
team_id=str(integration.external_id),
)
case _:
return self.respond(
{
"results": [],
"warning": f"Channel listing not supported for provider '{integration.provider}'.",
}
)
except ApiError as e:
return self.respond({"detail": str(e)}, status=400)
return self.respond({"results": results})
|
OrganizationIntegrationChannelsEndpoint
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 6900,
"end": 7029
}
|
class ____(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
|
CustomSingleObjectView
|
python
|
milvus-io__pymilvus
|
pymilvus/milvus_client/index.py
|
{
"start": 2086,
"end": 2448
}
|
class ____(list):
"""List of indexs of a collection"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def add_index(self, field_name: str, index_type: str = "", index_name: str = "", **kwargs):
index_param = IndexParam(field_name, index_type, index_name, **kwargs)
super().append(index_param)
|
IndexParams
|
python
|
numpy__numpy
|
numpy/distutils/_shell_utils.py
|
{
"start": 812,
"end": 2130
}
|
class ____:
"""
The parsing behavior used by `subprocess.call("string")` on Windows, which
matches the Microsoft C/C++ runtime.
Note that this is _not_ the behavior of cmd.
"""
@staticmethod
def join(argv):
# note that list2cmdline is specific to the windows syntax
return subprocess.list2cmdline(argv)
@staticmethod
def split(cmd):
import ctypes # guarded import for systems without ctypes
try:
ctypes.windll
except AttributeError:
raise NotImplementedError
# Windows has special parsing rules for the executable (no quotes),
# that we do not care about - insert a dummy element
if not cmd:
return []
cmd = 'dummy ' + cmd
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
nargs = ctypes.c_int()
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
args = [lpargs[i] for i in range(nargs.value)]
assert not ctypes.windll.kernel32.LocalFree(lpargs)
# strip the element we inserted
assert args[0] == "dummy"
return args[1:]
|
WindowsParser
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
|
{
"start": 5487,
"end": 5576
}
|
class ____(IncrementalShopifyStream):
data_field = "smart_collections"
|
SmartCollections
|
python
|
Delgan__loguru
|
tests/test_pickling.py
|
{
"start": 1009,
"end": 10653
}
|
class ____(logging.Handler):
def __init__(self, level):
super().__init__(level)
self.written = ""
def emit(self, record):
self.written += record.getMessage()
def acquire(self):
pass
def release(self):
pass
def createLock(self): # noqa: N802
self.lock = MockLock()
def format_function(record):
return "-> <red>{message}</red>"
def filter_function(record):
return "[PASS]" in record["message"]
def patch_function(record):
record["extra"]["foo"] = "bar"
def rotation_function(message, file):
pass
def retention_function(files):
pass
def compression_function(path):
pass
def test_pickling_function_handler(capsys):
logger.add(print_, format="{level} - {function} - {message}")
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
out, err = capsys.readouterr()
assert out == "DEBUG - test_pickling_function_handler - A message\n"
assert err == ""
def test_pickling_coroutine_function_handler(capsys):
logger.add(async_print, format="{level} - {function} - {message}")
with copied_logger_though_pickle(logger) as dupe_logger:
async def async_debug():
dupe_logger.debug("A message")
await dupe_logger.complete()
asyncio.run(async_debug())
out, err = capsys.readouterr()
assert out == "DEBUG - async_debug - A message\n"
assert err == ""
@pytest.mark.parametrize("flushable", [True, False])
@pytest.mark.parametrize("stoppable", [True, False])
def test_pickling_stream_handler(flushable, stoppable):
stream = StreamHandler(flushable, stoppable)
logger.add(stream, format="{level} - {function} - {message}")
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
stream = next(iter(dupe_logger._core.handlers.values()))._sink._stream
assert stream.wrote == "DEBUG - test_pickling_stream_handler - A message\n"
assert stream.flushed == flushable
assert stream.stopped == stoppable
def test_pickling_standard_handler():
handler = StandardHandler(logging.NOTSET)
logger.add(handler, format="{level} - {function} - {message}")
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
handler = next(iter(dupe_logger._core.handlers.values()))._sink._handler
assert handler.written == "DEBUG - test_pickling_standard_handler - A message"
def test_pickling_standard_handler_root_logger_not_picklable(monkeypatch, capsys):
def reduce_protocol():
raise TypeError("Not picklable")
with monkeypatch.context() as context:
context.setattr(logging.getLogger(), "__reduce__", reduce_protocol, raising=False)
handler = StandardHandler(logging.NOTSET)
logger.add(handler, format="=> {message}", catch=False)
with copied_logger_though_pickle(logger) as dupe_logger:
logger.info("Ok")
dupe_logger.info("Ok")
out, err = capsys.readouterr()
assert out == ""
assert err == ""
assert handler.written == "=> Ok"
def test_pickling_file_handler(tmp_path):
file = tmp_path / "test.log"
logger.add(file, format="{level} - {function} - {message}", delay=True)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
assert file.read_text() == "DEBUG - test_pickling_file_handler - A message\n"
@pytest.mark.parametrize(
"rotation",
[
1000,
"daily",
datetime.timedelta(minutes=60),
datetime.time(hour=12, minute=00, second=00),
"200 MB",
"10:00",
"5 hours",
rotation_function,
],
)
def test_pickling_file_handler_rotation(tmp_path, rotation):
file = tmp_path / "test.log"
logger.add(file, format="{level} - {function} - {message}", delay=True, rotation=rotation)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
assert file.read_text() == "DEBUG - test_pickling_file_handler_rotation - A message\n"
@pytest.mark.parametrize(
"retention", [1000, datetime.timedelta(hours=13), "10 days", retention_function]
)
def test_pickling_file_handler_retention(tmp_path, retention):
file = tmp_path / "test.log"
logger.add(file, format="{level} - {function} - {message}", delay=True, retention=retention)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
assert file.read_text() == "DEBUG - test_pickling_file_handler_retention - A message\n"
@pytest.mark.parametrize("compression", ["zip", "gz", "tar", compression_function])
def test_pickling_file_handler_compression(tmp_path, compression):
file = tmp_path / "test.log"
logger.add(file, format="{level} - {function} - {message}", delay=True, compression=compression)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.debug("A message")
assert file.read_text() == "DEBUG - test_pickling_file_handler_compression - A message\n"
def test_pickling_no_handler(writer):
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.add(writer, format="{level} - {function} - {message}")
dupe_logger.debug("A message")
assert writer.read() == "DEBUG - test_pickling_no_handler - A message\n"
def test_pickling_handler_not_serializable():
logger.add(lambda m: None)
with pytest.raises((pickle.PicklingError, AttributeError), match="Can't (pickle|get local)"):
pickle.dumps(logger)
def test_pickling_filter_function(capsys):
logger.add(print_, format="{message}", filter=filter_function)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.info("Nope")
dupe_logger.info("[PASS] Yes")
out, err = capsys.readouterr()
assert out == "[PASS] Yes\n"
assert err == ""
@pytest.mark.parametrize("filter", ["", "tests"])
def test_pickling_filter_name(capsys, filter):
logger.add(print_, format="{message}", filter=filter)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.info("A message")
out, err = capsys.readouterr()
assert out == "A message\n"
assert err == ""
@pytest.mark.parametrize("colorize", [True, False])
def test_pickling_format_string(capsys, colorize):
logger.add(print_, format="-> <red>{message}</red>", colorize=colorize)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.info("The message")
out, err = capsys.readouterr()
assert out == parse("-> <red>The message</red>\n", strip=not colorize)
assert err == ""
@pytest.mark.parametrize("colorize", [True, False])
def test_pickling_format_function(capsys, colorize):
logger.add(print_, format=format_function, colorize=colorize)
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.info("The message")
out, err = capsys.readouterr()
assert out == parse("-> <red>The message</red>", strip=not colorize)
assert err == ""
def test_pickling_filter_function_not_serializable():
logger.add(print, filter=lambda r: True)
with pytest.raises((pickle.PicklingError, AttributeError), match="Can't (pickle|get local)"):
pickle.dumps(logger)
def test_pickling_format_function_not_serializable():
logger.add(print, format=lambda r: "{message}")
with pytest.raises((pickle.PicklingError, AttributeError), match="Can't (pickle|get local)"):
pickle.dumps(logger)
def test_pickling_bound_logger(writer):
bound_logger = logger.bind(foo="bar")
with copied_logger_though_pickle(bound_logger) as dupe_logger:
dupe_logger.add(writer, format="{extra[foo]}")
dupe_logger.info("Test")
assert writer.read() == "bar\n"
def test_pickling_patched_logger(writer):
patched_logger = logger.patch(patch_function)
with copied_logger_though_pickle(patched_logger) as dupe_logger:
dupe_logger.add(writer, format="{extra[foo]}")
dupe_logger.info("Test")
assert writer.read() == "bar\n"
def test_remove_after_pickling(capsys):
i = logger.add(print_, format="{message}")
logger.info("A")
with copied_logger_though_pickle(logger) as dupe_logger:
dupe_logger.remove(i)
dupe_logger.info("B")
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_pickling_logging_method(capsys):
logger.add(print_, format="{level} - {function} - {message}")
pickled = pickle.dumps(logger.critical)
func = pickle.loads(pickled)
func("A message")
out, err = capsys.readouterr()
assert out == "CRITICAL - test_pickling_logging_method - A message\n"
assert err == ""
def test_pickling_log_method(capsys):
logger.add(print_, format="{level} - {function} - {message}")
pickled = pickle.dumps(logger.log)
func = pickle.loads(pickled)
func(19, "A message")
out, err = capsys.readouterr()
assert out == "Level 19 - test_pickling_log_method - A message\n"
assert err == ""
@pytest.mark.parametrize(
"method",
[
logger.add,
logger.remove,
logger.catch,
logger.opt,
logger.bind,
logger.patch,
logger.level,
logger.disable,
logger.enable,
logger.configure,
logger.parse,
logger.exception,
],
)
def test_pickling_no_error(method):
pickled = pickle.dumps(method)
unpickled = pickle.loads(pickled)
assert unpickled
|
StandardHandler
|
python
|
ray-project__ray
|
python/ray/air/execution/_internal/tracked_actor_task.py
|
{
"start": 113,
"end": 1261
}
|
class ____:
"""Actor task tracked by a Ray event manager.
This container class is used to define callbacks to be invoked when
the task resolves, errors, or times out.
Note:
Objects of this class are returned by the :class:`RayActorManager`.
This class should not be instantiated manually.
Args:
tracked_actor: Tracked actor object this task is scheduled on.
on_result: Callback to invoke when the task resolves.
on_error: Callback to invoke when the task fails.
Example:
.. code-block:: python
tracked_futures = actor_manager.schedule_actor_tasks(
actor_manager.live_actors,
"foo",
on_result=lambda actor, result: print(result)
)
"""
def __init__(
self,
tracked_actor: TrackedActor,
on_result: Optional[Callable[[TrackedActor, Any], None]] = None,
on_error: Optional[Callable[[TrackedActor, Exception], None]] = None,
):
self._tracked_actor = tracked_actor
self._on_result = on_result
self._on_error = on_error
|
TrackedActorTask
|
python
|
pytorch__pytorch
|
test/test_mps.py
|
{
"start": 360432,
"end": 362029
}
|
class ____(TestCaseMPS):
@serialTest()
def test_64bit_binops(self):
if torch.mps.recommended_max_memory() < 16_000_000_000:
raise unittest.SkipTest("Needs at least 16Gb of RAM")
a = torch.rand(1, 1024, 1024, dtype=torch.float16, device='mps')
b = torch.rand(5000, 1, 1, dtype=torch.float16, device='mps')
rc = (a + b).sin()
slice_idx = -2
rc_slice = rc[slice_idx:]
rc_slice_cpu = (a.cpu() + b.cpu()[slice_idx:]).sin()
self.assertEqual(rc_slice, rc_slice_cpu)
@serialTest()
def test_64bit_index_select(self):
if torch.mps.recommended_max_memory() < 16_000_000_000:
raise unittest.SkipTest("Needs at least 16Gb of RAM")
B, N = 11, 20000
x = torch.empty(B, N, N, dtype=torch.float16, device='mps')
for i in range(B):
x[i] = 1.0 * i
batch_idx = torch.tensor([9], device='mps')
y = x[batch_idx]
self.assertEqual(y[0, 1, 2].item(), 9.0)
# Reclaim memory after running the tests
del y
del x
gc.collect()
torch.mps.empty_cache()
@serialTest()
def test_rand_2b_raises(self):
int32_max = torch.iinfo(torch.int32).max
with self.assertRaises(RuntimeError):
# This used to crash with NDArray dimension length > INT_MAX
x = torch.randint(0, 10, (int32_max + 1,), dtype=torch.int8, device='mps')
x = torch.randint(0, 10, (int32_max,), dtype=torch.int8, device='mps')
self.assertEqual(x.numel(), int32_max)
del x
|
TestLargeTensors
|
python
|
astropy__astropy
|
astropy/visualization/wcsaxes/frame.py
|
{
"start": 11324,
"end": 13052
}
|
class ____(BaseFrame):
"""
An elliptical frame.
"""
spine_names = "chv"
_spine_auto_position_order = "chv"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0.0, 2 * np.pi, 1000)
self["c"].data = np.array(
[xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]
).transpose()
self["h"].data = np.array(
[np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]
).transpose()
self["v"].data = np.array(
[np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]
).transpose()
super().update_spines()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle.
"""
self.update_spines()
vertices = self["c"].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn.
"""
axis = "c"
pixel = self[axis]._get_pixel()
line = Line2D(
pixel[:, 0],
pixel[:, 1],
linewidth=self._linewidth,
color=self._color,
zorder=1000,
)
line.draw(renderer)
|
EllipticalFrame
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/marketing_platform/hooks/campaign_manager.py
|
{
"start": 1195,
"end": 11578
}
|
class ____(GoogleBaseHook):
"""Hook for Google Campaign Manager."""
_conn: Resource | None = None
def __init__(
self,
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Resource:
"""Retrieve connection to Campaign Manager."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"dfareporting",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def delete_report(self, profile_id: str, report_id: str) -> Any:
"""
Delete a report by its ID.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.delete(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def insert_report(self, profile_id: str, report: dict[str, Any]) -> Any:
"""
Create a report.
:param profile_id: The DFA user profile ID.
:param report: The report resource to be inserted.
"""
response = (
self.get_conn()
.reports()
.insert(profileId=profile_id, body=report)
.execute(num_retries=self.num_retries)
)
return response
def list_reports(
self,
profile_id: str,
max_results: int | None = None,
scope: str | None = None,
sort_field: str | None = None,
sort_order: str | None = None,
) -> list[dict]:
"""
Retrieve list of reports.
:param profile_id: The DFA user profile ID.
:param max_results: Maximum number of results to return.
:param scope: The scope that defines which results are returned.
:param sort_field: The field by which to sort the list.
:param sort_order: Order of sorted results.
"""
reports: list[dict] = []
conn = self.get_conn()
request = conn.reports().list(
profileId=profile_id,
maxResults=max_results,
scope=scope,
sortField=sort_field,
sortOrder=sort_order,
)
while request is not None:
response = request.execute(num_retries=self.num_retries)
reports.extend(response.get("items", []))
request = conn.reports().list_next(previous_request=request, previous_response=response)
return reports
def patch_report(self, profile_id: str, report_id: str, update_mask: dict) -> Any:
"""
Update a report. This method supports patch semantics.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param update_mask: The relevant portions of a report resource,
according to the rules of patch semantics.
"""
response = (
self.get_conn()
.reports()
.patch(profileId=profile_id, reportId=report_id, body=update_mask)
.execute(num_retries=self.num_retries)
)
return response
def run_report(self, profile_id: str, report_id: str, synchronous: bool | None = None) -> Any:
"""
Run a report.
:param profile_id: The DFA profile ID.
:param report_id: The ID of the report.
:param synchronous: If set and true, tries to run the report synchronously.
"""
response = (
self.get_conn()
.reports()
.run(profileId=profile_id, reportId=report_id, synchronous=synchronous)
.execute(num_retries=self.num_retries)
)
return response
def update_report(self, profile_id: str, report_id: str) -> Any:
"""
Update a report.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.update(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report(self, file_id: str, profile_id: str, report_id: str) -> Any:
"""
Retrieve a report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
"""
response = (
self.get_conn()
.reports()
.files()
.get(fileId=file_id, profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report_file(self, file_id: str, profile_id: str, report_id: str) -> http.HttpRequest:
"""
Retrieve a media part of report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
:return: googleapiclient.http.HttpRequest
"""
request = (
self.get_conn()
.reports()
.files()
.get_media(fileId=file_id, profileId=profile_id, reportId=report_id)
)
return request
@staticmethod
def _conversions_batch_request(
conversions: list[dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
kind: str,
) -> dict[str, Any]:
return {
"kind": kind,
"conversions": conversions,
"encryptionInfo": {
"kind": "dfareporting#encryptionInfo",
"encryptionEntityType": encryption_entity_type,
"encryptionEntityId": encryption_entity_id,
"encryptionSource": encryption_source,
},
}
def conversions_batch_insert(
self,
profile_id: str,
conversions: list[dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_inserts: int = 0,
) -> Any:
"""
Insert conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to insert, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/rest/v4/conversions/batchinsert
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_inserts: The maximum number of conversions that failed to be inserted
"""
response = (
self.get_conn()
.conversions()
.batchinsert(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchInsertRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get("hasFailures", False):
errored_conversions = [stat["errors"] for stat in response["status"] if "errors" in stat]
if len(errored_conversions) > max_failed_inserts:
raise AirflowException(errored_conversions)
return response
def conversions_batch_update(
self,
profile_id: str,
conversions: list[dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_updates: int = 0,
) -> Any:
"""
Update existing conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to update, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/rest/v4/conversions/batchupdate
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_updates: The maximum number of conversions that failed to be updated
"""
response = (
self.get_conn()
.conversions()
.batchupdate(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchUpdateRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get("hasFailures", False):
errored_conversions = [stat["errors"] for stat in response["status"] if "errors" in stat]
if len(errored_conversions) > max_failed_updates:
raise AirflowException(errored_conversions)
return response
|
GoogleCampaignManagerHook
|
python
|
mlflow__mlflow
|
mlflow/entities/span.py
|
{
"start": 18480,
"end": 30525
}
|
class ____(Span):
"""
A "live" version of the :py:class:`Span <mlflow.entities.Span>` class.
The live spans are those being created and updated during the application runtime.
When users start a new span using the tracing APIs within their code, this live span
object is returned to get and set the span attributes, status, events, and etc.
"""
def __init__(
self,
otel_span: OTelSpan,
trace_id: str,
span_type: str = SpanType.UNKNOWN,
):
"""
The `otel_span` argument takes an instance of OpenTelemetry Span class, which is
indeed a subclass of ReadableSpan. Thanks to this, the getter methods of the Span
class can be reused without any modification.
Note that the constructor doesn't call the super().__init__ method, because the Span
initialization logic is a bit different from the immutable span.
"""
if not isinstance(otel_span, OTelReadableSpan):
raise MlflowException(
"The `otel_span` argument for the LiveSpan class must be an instance of "
f"trace.Span, but got {type(otel_span)}.",
INVALID_PARAMETER_VALUE,
)
self._span = otel_span
self._attributes = _SpanAttributesRegistry(otel_span)
self._attributes.set(SpanAttributeKey.REQUEST_ID, trace_id)
self._attributes.set(SpanAttributeKey.SPAN_TYPE, span_type)
# Track the original span name for deduplication purposes during span logging.
# Why: When traces contain multiple spans with identical names (e.g., multiple "LLM"
# or "query" spans), it's difficult for users to distinguish between them in the UI
# and logs. As spans are logged, we incrementally add numeric suffixes (_1, _2, etc.) to
# make each span uniquely identifiable within its trace
self._original_name = otel_span.name
def set_span_type(self, span_type: str):
"""Set the type of the span."""
self.set_attribute(SpanAttributeKey.SPAN_TYPE, span_type)
def set_inputs(self, inputs: Any):
"""Set the input values to the span."""
self.set_attribute(SpanAttributeKey.INPUTS, inputs)
def set_outputs(self, outputs: Any):
"""Set the output values to the span."""
self.set_attribute(SpanAttributeKey.OUTPUTS, outputs)
def set_attributes(self, attributes: dict[str, Any]):
"""
Set the attributes to the span. The attributes must be a dictionary of key-value pairs.
This method is additive, i.e. it will add new attributes to the existing ones. If an
attribute with the same key already exists, it will be overwritten.
"""
if not isinstance(attributes, dict):
_logger.warning(
f"Attributes must be a dictionary, but got {type(attributes)}. Skipping."
)
return
for key, value in attributes.items():
self.set_attribute(key, value)
def set_attribute(self, key: str, value: Any):
"""Set a single attribute to the span."""
self._attributes.set(key, value)
def set_status(self, status: SpanStatusCode | str):
"""
Set the status of the span.
Args:
status: The status of the span. This can be a
:py:class:`SpanStatus <mlflow.entities.SpanStatus>` object or a string representing
of the status code defined in
:py:class:`SpanStatusCode <mlflow.entities.SpanStatusCode>`
e.g. ``"OK"``, ``"ERROR"``.
"""
if isinstance(status, str):
status = SpanStatus(status)
# NB: We need to set the OpenTelemetry native StatusCode, because span's set_status
# method only accepts a StatusCode enum in their definition.
# https://github.com/open-telemetry/opentelemetry-python/blob/8ed71b15fb8fc9534529da8ce4a21e686248a8f3/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L949
# Working around this is possible, but requires some hack to handle automatic status
# propagation mechanism, so here we just use the native object that meets our
# current requirements at least. Nevertheless, declaring the new class extending
# the OpenTelemetry Status class so users code doesn't have to import the OTel's
# StatusCode object, which makes future migration easier.
self._span.set_status(status.to_otel_status())
def add_event(self, event: SpanEvent):
"""
Add an event to the span.
Args:
event: The event to add to the span. This should be a
:py:class:`SpanEvent <mlflow.entities.SpanEvent>` object.
"""
self._span.add_event(event.name, event.attributes, event.timestamp)
def record_exception(self, exception: str | Exception):
"""
Record an exception on the span, adding an exception event and setting span status to ERROR.
Args:
exception: The exception to record. Can be an Exception instance or a string
describing the exception.
"""
if isinstance(exception, Exception):
self.add_event(SpanEvent.from_exception(exception))
elif isinstance(exception, str):
self.add_event(SpanEvent.from_exception(Exception(exception)))
else:
raise MlflowException(
"The `exception` parameter must be an Exception instance or a string.",
INVALID_PARAMETER_VALUE,
)
self.set_status(
SpanStatus(
status_code=SpanStatusCode.ERROR,
description=f"{type(exception).__name__}: {exception}",
)
)
def end(
self,
outputs: Any | None = None,
attributes: dict[str, Any] | None = None,
status: SpanStatus | str | None = None,
end_time_ns: int | None = None,
):
"""
End the span.
outputs: Outputs to set on the span.
attributes: A dictionary of attributes to set on the span. If the span already has
attributes, the new attributes will be merged with the existing ones. If the same
key already exists, the new value will overwrite the old one.
status: The status of the span. This can be a
:py:class:`SpanStatus <mlflow.entities.SpanStatus>` object or a string
representing the status code defined in
:py:class:`SpanStatusCode <mlflow.entities.SpanStatusCode>`
e.g. ``"OK"``, ``"ERROR"``. The default status is OK.
end_time_ns: The end time of the span in nano seconds since the UNIX epoch.
If not provided, the current time will be used.
:meta private:
"""
try:
self.set_attributes(attributes or {})
if outputs is not None:
self.set_outputs(outputs)
if status is not None:
self.set_status(status)
# NB: In OpenTelemetry, status code remains UNSET if not explicitly set
# by the user. However, there is not way to set the status when using
# @mlflow.trace decorator. Therefore, we just automatically set the status
# to OK if it is not ERROR.
if self.status.status_code != SpanStatusCode.ERROR:
self.set_status(SpanStatus(SpanStatusCode.OK))
# Apply span processors
apply_span_processors(self)
self._span.end(end_time=end_time_ns)
except Exception as e:
_logger.warning(
f"Failed to end span {self.span_id}: {e}. "
"For full traceback, set logging level to debug.",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
def from_dict(cls, data: dict[str, Any]) -> "Span":
raise NotImplementedError("The `from_dict` method is not supported for the LiveSpan class.")
def to_immutable_span(self) -> "Span":
"""
Downcast the live span object to the immutable span.
:meta private:
"""
# All state of the live span is already persisted in the OpenTelemetry span object.
return Span(self._span)
@classmethod
def from_immutable_span(
cls,
span: Span,
parent_span_id: str | None = None,
trace_id: str | None = None,
experiment_id: str | None = None,
otel_trace_id: str | None = None,
) -> "LiveSpan":
"""
Create a new LiveSpan object from the given immutable span by
cloning the underlying OpenTelemetry span within current context.
This is particularly useful when we merging a remote trace into the current trace.
We cannot merge the remote trace directly, because it is already stored as an immutable
span, meaning that we cannot update metadata like trace ID, parent span ID,
which are necessary for merging the trace.
Args:
span: The immutable span object to clone.
parent_span_id: The parent span ID of the new span.
If it is None, the span will be created as a root span.
trace_id: The trace ID to be set on the new span. Specify this if you want to
create the new span with a particular trace ID.
experiment_id: The experiment ID to be set on the new span. If not specified, the
experiment ID will be set to the current experiment ID.
otel_trace_id: The OpenTelemetry trace ID of the new span in hex encoded format.
If not specified, the newly generated trace ID will be used.
Returns:
The new LiveSpan object with the same state as the original span.
:meta private:
"""
from mlflow.tracing.trace_manager import InMemoryTraceManager
trace_manager = InMemoryTraceManager.get_instance()
parent_span = trace_manager.get_span_from_id(trace_id, parent_span_id)
# Create a new span with the same name, parent, and start time
otel_span = mlflow.tracing.provider.start_detached_span(
name=span.name,
parent=parent_span._span if parent_span else None,
start_time_ns=span.start_time_ns,
experiment_id=experiment_id,
)
# The latter one from attributes is the newly generated trace ID by the span processor.
trace_id = trace_id or json.loads(otel_span.attributes.get(SpanAttributeKey.REQUEST_ID))
# Span processor registers a new span in the in-memory trace manager, but we want to pop it
clone_span = trace_manager._traces[trace_id].span_dict.pop(
encode_span_id(otel_span.context.span_id)
)
# Copy all the attributes, inputs, outputs, and events from the original span
clone_span.set_status(span.status)
clone_span.set_attributes(
{k: v for k, v in span.attributes.items() if k != SpanAttributeKey.REQUEST_ID}
)
if span.inputs:
clone_span.set_inputs(span.inputs)
if span.outputs:
clone_span.set_outputs(span.outputs)
for event in span.events:
clone_span.add_event(event)
# Update trace ID and span ID
context = span._span.get_span_context()
clone_span._span._context = SpanContext(
# Override otel_trace_id if provided, otherwise use the new trace ID
trace_id=decode_id(otel_trace_id) if otel_trace_id else otel_span.context.trace_id,
# Re-use same span ID as their ID space is local to the trace
span_id=context.span_id,
is_remote=context.is_remote,
# Override trace flag as if it is sampled within current context.
trace_flags=TraceFlags(TraceFlags.SAMPLED),
)
return clone_span
NO_OP_SPAN_TRACE_ID = "MLFLOW_NO_OP_SPAN_TRACE_ID"
|
LiveSpan
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/type_api.py
|
{
"start": 2409,
"end": 2618
}
|
class ____(Enum):
NO_VALUE_IN_LIST = 0
"""indicates we are trying to determine the type of an expression
against an empty list."""
_NO_VALUE_IN_LIST = _NoValueInList.NO_VALUE_IN_LIST
|
_NoValueInList
|
python
|
openai__openai-python
|
src/openai/resources/embeddings.py
|
{
"start": 11651,
"end": 11902
}
|
class ____:
def __init__(self, embeddings: AsyncEmbeddings) -> None:
self._embeddings = embeddings
self.create = _legacy_response.async_to_raw_response_wrapper(
embeddings.create,
)
|
AsyncEmbeddingsWithRawResponse
|
python
|
ansible__ansible
|
test/integration/targets/error_from_connection/connection_plugins/dummy.py
|
{
"start": 356,
"end": 918
}
|
class ____(ConnectionBase):
transport = 'dummy'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
raise AnsibleError('an error with {{ some Jinja }}')
def _connect(self):
pass
def exec_command(self, cmd, in_data=None, sudoable=True):
pass
def put_file(self, in_path, out_path):
pass
def fetch_file(self, in_path, out_path):
pass
def close(self):
pass
|
Connection
|
python
|
skorch-dev__skorch
|
skorch/tests/test_probabilistic.py
|
{
"start": 4684,
"end": 18060
}
|
class ____:
"""Base class for all GP estimators.
This class defined all fixtures, most of which need to be implemented by the
respective subclass, as well as all the tests. The tests take care of using
attributes and properties that are true for all sorts of GPs (e.g. only
using parameters shared by all likelihoods).
"""
#####################
# testing functions #
#####################
@staticmethod
def assert_values_differ(x):
x = to_numpy(x)
assert len(np.unique(x)) > 1
##########################
# constants and fixtures #
##########################
@property
def n_samples(self):
# expects int
return NotImplementedError
@property
def n_targets(self):
# expects int
raise NotImplementedError
@property
def supports_predict_proba(self):
# expects bool
raise NotImplementedError
@property
def supports_return_std(self):
# expects bool
# This only checks if the argument is allowed by predict, not whether it
# actually implements a solution
raise NotImplementedError
@property
def supports_return_cov(self):
# expects bool
# This only checks if the argument is allowed by predict, not whether it
# actually implements a solution
raise NotImplementedError
@property
def settable_params(self):
# expects dict of parameters that can be set with set_params
raise NotImplementedError
@property
def scoring(self):
# the default scoring function of this estimator, must be sklearn
# compatible
raise NotImplementedError
@pytest.fixture
def gp_cls(self):
raise NotImplementedError
@pytest.fixture
def module_cls(self):
raise NotImplementedError
@pytest.fixture
def module_multioutput_cls(self):
# since multioutput is not currently being tested, not an abstract
# method
pass
@pytest.fixture
def data(self):
raise NotImplementedError
@pytest.fixture
def gp(self, gp_cls, module_cls, data):
raise NotImplementedError
@pytest.fixture
def gp_fit(self, gp, data):
X, y = data
return gp.fit(X, y)
@pytest.fixture
def gp_multioutput(self, gp_cls, module_multioutput_cls, data):
# should be fitted; since it's not currently being tested, not an
# abstract method
pass
@pytest.fixture
def pipe(self, gp):
return Pipeline([
('noop', None),
('gp', gp),
])
######################
# saving and loading #
######################
def test_pickling(self, gp_fit, data):
loaded = pickle.loads(pickle.dumps(gp_fit))
X, _ = data
y_pred_before = gp_fit.predict(X)
y_pred_after = loaded.predict(X)
assert np.allclose(y_pred_before, y_pred_after)
def test_deepcopy(self, gp_fit, data):
copied = copy.deepcopy(gp_fit)
X, _ = data
y_pred_before = gp_fit.predict(X)
y_pred_after = copied.predict(X)
assert np.allclose(y_pred_before, y_pred_after)
def test_clone(self, gp_fit, data):
clone(gp_fit) # does not raise
def test_save_load_params(self, gp_fit, tmpdir):
gp2 = clone(gp_fit).initialize()
# check first that parameters are not equal
for (_, p0), (_, p1) in zip(
gp_fit.get_all_learnable_params(), gp2.get_all_learnable_params(),
):
assert not (p0 == p1).all()
# save and load params to gp2
p_module = tmpdir.join('module.pt')
p_likelihood = tmpdir.join('likelihood.pt')
with open(str(p_module), 'wb') as fm, open(str(p_likelihood), 'wb') as fll:
gp_fit.save_params(f_params=fm, f_likelihood=fll)
with open(str(p_module), 'rb') as fm, open(str(p_likelihood), 'rb') as fll:
gp2.load_params(f_params=fm, f_likelihood=fll)
# now parameters should be equal
for (n0, p0), (n1, p1) in zip(
gp_fit.get_all_learnable_params(), gp2.get_all_learnable_params(),
):
assert n0 == n1
torch.testing.assert_close(p0, p1)
##############
# functional #
##############
def test_fit(self, gp_fit, recwarn):
# fitting does not raise anything and triggers no warning
assert not recwarn.list
def test_gp_learns(self, gp_fit):
history = gp_fit.history
assert history[0, 'train_loss'] > 0.5 * history[-1, 'train_loss']
def test_forward(self, gp_fit, data):
X = data[0]
y_forward = gp_fit.forward(X)
for yi in y_forward:
assert isinstance(yi, torch.distributions.distribution.Distribution)
total_shape = sum(get_batch_size(p) for p in y_forward)
assert total_shape == self.n_samples
def test_predict(self, gp_fit, data):
X = data[0]
y_pred = gp_fit.predict(X)
assert isinstance(y_pred, np.ndarray)
assert y_pred.shape == (self.n_samples,)
self.assert_values_differ(y_pred)
def test_predict_proba(self, gp_fit, data):
if not self.supports_predict_proba:
return
X = data[0]
y_proba = gp_fit.predict_proba(X)
assert isinstance(y_proba, np.ndarray)
assert y_proba.shape == (self.n_samples, self.n_targets)
self.assert_values_differ(y_proba)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_fit_and_predict_with_cuda(self, gp, data):
gp.set_params(device='cuda')
X, y = data
gp.fit(X, y)
y_pred = gp.predict(X)
self.assert_values_differ(y_pred)
def test_in_sklearn_pipeline(self, pipe, data):
X, y = data
# none of this raises an error
pipe.fit(X, y)
pipe.predict(X)
pipe.set_params(**self.settable_params)
def test_grid_search_works(self, gp, data, recwarn):
X, y = data
params = {
'lr': [0.01, 0.02],
'max_epochs': [10, 20],
# this parameter does not exist but that's okay
'likelihood__some_parameter': [1, 2],
}
gp.set_params(verbose=0)
gs = GridSearchCV(gp, params, refit=True, cv=3, scoring=self.scoring)
gs.fit(X[:60], y[:60]) # for speed
# sklearn will catch fit failures and raise a warning, we should thus
# check that no warnings are generated
assert not recwarn.list
# Multioutput doesn't work because GPyTorch makes assumptions about the
# module output that are not compatible with multiple outputs. The tests are
# left in case this is fixed but they're not being executed.
@pytest.mark.skip
def test_fit_multioutput(self, gp_multioutput):
# doesn't raise
pass
@pytest.mark.skip
def test_multioutput_forward_iter(self, gp_multioutput, data):
X = data[0]
y_infer = next(gp_multioutput.forward_iter(X))
assert isinstance(y_infer, tuple)
assert len(y_infer) == 3
assert y_infer[0].shape[0] == min(len(X), gp_multioutput.batch_size)
@pytest.mark.skip
def test_multioutput_forward(self, gp_multioutput, data):
X = data[0]
y_infer = gp_multioutput.forward(X)
assert isinstance(y_infer, tuple)
assert len(y_infer) == 2
for arr in y_infer:
assert is_torch_data_type(arr)
for output in y_infer:
assert len(output) == self.n_samples
@pytest.mark.skip
def test_multioutput_predict(self, gp_multioutput, data):
X = data[0]
# does not raise
y_pred = gp_multioutput.predict(X)
# Expecting only 1 column containing predict class:
# (number of samples,)
assert y_pred.shape == (self.n_samples)
self.assert_values_differ(y_pred)
@pytest.mark.skip
def test_multioutput_predict_proba(self, gp_multioutput, data):
X = data[0]
# does not raise
y_proba = gp_multioutput.predict_proba(X)
self.assert_values_differ(y_proba)
# Expecting full output: (number of samples, number of output units)
assert y_proba.shape == (self.n_samples, self.n_targets)
# Probabilities, hence these limits
assert y_proba.min() >= 0
assert y_proba.max() <= 1
##################
# initialization #
##################
@pytest.mark.parametrize('kwargs,expected', [
({}, ""),
({
'likelihood__noise_prior': gpytorch.priors.NormalPrior(0, 1),
'likelihood__batch_shape': (345,),
}, ""),
({
'likelihood__noise_prior': gpytorch.priors.NormalPrior(0, 1),
'optimizer__momentum': 0.567,
}, ""),
])
def test_set_params_uninitialized_net_correct_message(
self, gp, kwargs, expected, capsys):
# When gp is uninitialized, there is nothing to alert the user to
gp.set_params(**kwargs)
msg = capsys.readouterr()[0].strip()
assert msg == expected
@pytest.mark.parametrize('kwargs,expected', [
({}, ""),
(
# this parameter does not exist but that's okay
{'likelihood__some_parameter': 2},
("Re-initializing module because the following "
"parameters were re-set: likelihood__some_parameter.\n"
"Re-initializing criterion.\n"
"Re-initializing optimizer.")
),
(
{
# this parameter does not exist but that's okay
'likelihood__some_parameter': 2,
'optimizer__momentum': 0.567,
},
("Re-initializing module because the following "
"parameters were re-set: likelihood__some_parameter.\n"
"Re-initializing criterion.\n"
"Re-initializing optimizer.")
),
])
def test_set_params_initialized_net_correct_message(
self, gp, kwargs, expected, capsys):
# When gp is initialized, if module or optimizer need to be
# re-initialized, alert the user to the fact what parameters
# were responsible for re-initialization. Note that when the
# module parameters but not optimizer parameters were changed,
# the optimizer is re-initialized but not because the
# optimizer parameters changed.
gp.initialize().set_params(**kwargs)
msg = capsys.readouterr()[0].strip()
assert msg == expected
def test_likelihood_already_initialized_does_not_reinit(self, gp, gp_cls):
# When the likelihood is already initialized and no params changed, it
# should just be set as is instead of creating a new instance. In
# theory, the same should apply to modules but in all the examples here,
# modules require params, so we cannot test it.
gp_init = gp.initialize()
# create a new GP instance using this somewhat convoluted approach
# because we don't know what arguments are required to initialize from
# scratch
params = gp_init.get_params()
# set likelihood and likelihood to be initialized already
params['likelihood'] = gp_init.likelihood_
gp = gp_cls(**params).initialize()
assert gp.likelihood_ is gp_init.likelihood_
##########################
# probabalistic specific #
##########################
@pytest.mark.parametrize("n_samples", [1, 2, 10])
def test_sampling(self, gp, data, n_samples):
X, _ = data
samples = gp.initialize().sample(X, n_samples=n_samples)
assert samples.shape == (n_samples, len(X))
# check that values are not all the same -- this can happen when
# posterior variances are skipped via a setting
self.assert_values_differ(samples)
def test_confidence_region(self, gp_fit, data):
X, _ = data
# lower bound should always be lower than upper bound
lower_1, upper_1 = gp_fit.confidence_region(X, sigmas=1)
assert (lower_1 < upper_1).all()
lower_2, upper_2 = gp_fit.confidence_region(X, sigmas=2)
assert (lower_2 < upper_2).all()
# higher sigmas -> wider regions
assert (lower_2 < lower_1).all()
assert (upper_2 > upper_1).all()
def test_predict_return_std(self, gp_fit, data):
if not self.supports_return_std:
return
X, _ = data
y_proba, y_std = gp_fit.predict(X, return_std=True)
# not a lot we know for sure about the values of the standard deviation,
# hence only test shape and that they're positive
assert y_proba.shape == y_std.shape
assert (y_std > 0).all()
self.assert_values_differ(y_std)
def test_predict_return_cov(self, gp_fit, data):
if not self.supports_return_cov:
return
X, _ = data
msg = ("The 'return_cov' argument is not supported. Please try: "
"'posterior = next(gpr.forward_iter(X)); posterior.covariance_matrix'.")
with pytest.raises(NotImplementedError, match=re.escape(msg)):
gp_fit.predict(X, return_cov=True)
|
BaseProbabilisticTests
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/workbook/test_custom_sheet.py
|
{
"start": 363,
"end": 404
}
|
class ____(Worksheet):
pass
|
MyWorksheet
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_pretty.py
|
{
"start": 4853,
"end": 5002
}
|
class ____(dict):
def __repr__(self):
return "hi"
def test_dict_with_custom_repr():
assert pretty.pretty(ReprDict()) == "hi"
|
ReprDict
|
python
|
google__jax
|
tests/debugging_primitives_test.py
|
{
"start": 28133,
"end": 35738
}
|
class ____(jtu.JaxTestCase):
def _create_devices(self, shape):
num_devices = np.prod(shape)
devices = [DummyDevice("CPU", i) for i in range(num_devices)]
return np.array(devices).reshape(shape)
def test_trivial_sharding(self):
mesh = jax.sharding.Mesh(self._create_devices(1), ['x'])
pspec = jax.sharding.PartitionSpec('x')
sd = jax.sharding.NamedSharding(mesh, pspec)
shape = (5,)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
self.assertEqual(output(), _format_multiline("""
┌───────┐
│ CPU 0 │
└───────┘
"""))
def test_trivial_sharding_with_scale(self):
mesh = jax.sharding.Mesh(self._create_devices(1), ['x'])
pspec = jax.sharding.PartitionSpec('x')
sd = jax.sharding.NamedSharding(mesh, pspec)
shape = (5,)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd, scale=8.)
self.assertEqual(output(), _format_multiline("""
┌──────────────────────────────────────┐
│ CPU 0 │
└──────────────────────────────────────┘
"""))
def test_full_sharding(self):
mesh = jax.sharding.Mesh(self._create_devices((8, 4)), ['x', 'y'])
pspec = jax.sharding.PartitionSpec('x', 'y')
sd = jax.sharding.NamedSharding(mesh, pspec)
shape = (8, 8)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌───────┬───────┬───────┬───────┐
│ CPU 0 │ CPU 1 │ CPU 2 │ CPU 3 │
├───────┼───────┼───────┼───────┤
│ CPU 4 │ CPU 5 │ CPU 6 │ CPU 7 │
├───────┼───────┼───────┼───────┤
│ CPU 8 │ CPU 9 │CPU 10 │CPU 11 │
├───────┼───────┼───────┼───────┤
│CPU 12 │CPU 13 │CPU 14 │CPU 15 │
├───────┼───────┼───────┼───────┤
│CPU 16 │CPU 17 │CPU 18 │CPU 19 │
├───────┼───────┼───────┼───────┤
│CPU 20 │CPU 21 │CPU 22 │CPU 23 │
├───────┼───────┼───────┼───────┤
│CPU 24 │CPU 25 │CPU 26 │CPU 27 │
├───────┼───────┼───────┼───────┤
│CPU 28 │CPU 29 │CPU 30 │CPU 31 │
└───────┴───────┴───────┴───────┘
""")
self.assertEqual(output(), expected)
def test_sharding_with_replication(self):
shape = (8, 8)
mesh = jax.sharding.Mesh(self._create_devices((8, 4)), ['x', 'y'])
pspec = jax.sharding.PartitionSpec('x', None)
sd = jax.sharding.NamedSharding(mesh, pspec)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌───────────────────────┐
│ CPU 0,1,2,3 │
├───────────────────────┤
│ CPU 4,5,6,7 │
├───────────────────────┤
│ CPU 8,9,10,11 │
├───────────────────────┤
│ CPU 12,13,14,15 │
├───────────────────────┤
│ CPU 16,17,18,19 │
├───────────────────────┤
│ CPU 20,21,22,23 │
├───────────────────────┤
│ CPU 24,25,26,27 │
├───────────────────────┤
│ CPU 28,29,30,31 │
└───────────────────────┘
""")
self.assertEqual(output(), expected)
mesh = jax.sharding.Mesh(self._create_devices((4, 2)), ['x', 'y'])
pspec = jax.sharding.PartitionSpec(None, 'y')
sd = jax.sharding.NamedSharding(mesh, pspec)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌───────────┬───────────┐
│ │ │
│ │ │
│ │ │
│ │ │
│CPU 0,2,4,6│CPU 1,3,5,7│
│ │ │
│ │ │
│ │ │
│ │ │
└───────────┴───────────┘
""")
self.assertEqual(output(), expected)
def test_visualize_wide_array(self):
shape = (128, 10000)
mesh = jax.sharding.Mesh(self._create_devices((8, 4)), ['x', 'y'])
pspec = jax.sharding.PartitionSpec('x', None)
sd = jax.sharding.NamedSharding(mesh, pspec)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌──────────────────────────────────────────────────────────────────────────────┐
│ CPU 0,1,2,3 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 4,5,6,7 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 8,9,10,11 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 12,13,14,15 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 16,17,18,19 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 20,21,22,23 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 24,25,26,27 │
├──────────────────────────────────────────────────────────────────────────────┤
│ CPU 28,29,30,31 │
└──────────────────────────────────────────────────────────────────────────────┘
""")
self.assertEqual(output(), expected)
@jtu.ignore_warning(category=DeprecationWarning)
def test_visualize_pmap_sharding(self):
ss = pxla.ShardingSpec(
sharding=(pxla.Unstacked(8),),
mesh_mapping=(pxla.ShardedAxis(0),))
sd = jax.sharding.PmapSharding(self._create_devices(8), ss)
shape = (8,)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌───────┬───────┬───────┬───────┬───────┬───────┬───────┬───────┐
│ CPU 0 │ CPU 1 │ CPU 2 │ CPU 3 │ CPU 4 │ CPU 5 │ CPU 6 │ CPU 7 │
└───────┴───────┴───────┴───────┴───────┴───────┴───────┴───────┘
""")
self.assertEqual(output(), expected)
ss = pxla.ShardingSpec(
sharding=(pxla.Unstacked(8), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))
sd = jax.sharding.PmapSharding(self._create_devices(8), ss)
shape = (8, 2)
with jtu.capture_stdout() as output:
debugging.visualize_sharding(shape, sd)
expected = _format_multiline("""
┌───────┐
│ CPU 0 │
├───────┤
│ CPU 1 │
├───────┤
│ CPU 2 │
├───────┤
│ CPU 3 │
├───────┤
│ CPU 4 │
├───────┤
│ CPU 5 │
├───────┤
│ CPU 6 │
├───────┤
│ CPU 7 │
└───────┘
""")
self.assertEqual(output(), expected)
def test_visualize_sharding_shard_map(self):
mesh = jtu.create_mesh((2,), 'x')
def f():
a = jnp.zeros(1000)
debugging.visualize_array_sharding(a)
return a
with jtu.capture_stdout() as output:
f() # doesn't crash
with jtu.capture_stdout() as output:
jax.jit(f, out_shardings=jax.NamedSharding(mesh, P('x')))() # doesn't crash
with jtu.capture_stdout() as output:
jax.shard_map(f, mesh=mesh, in_specs=P(None), out_specs=P("x"))() # doesn't crash
with jtu.capture_stdout() as output:
jax.shard_map(f, mesh=mesh, in_specs=P(None), out_specs=P("x"),
check_vma=False)() # doesn't crash
|
VisualizeShardingTest
|
python
|
huggingface__transformers
|
src/transformers/models/flaubert/modeling_flaubert.py
|
{
"start": 2906,
"end": 6623
}
|
class ____(nn.Module):
def __init__(self, n_heads, dim, config, layer_idx: int = 0):
super().__init__()
self.layer_id = layer_idx
self.dim = dim
self.n_heads = n_heads
self.head_dim = dim // n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
def forward(
self,
input,
mask,
kv=None,
cache=None,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
is_cross_attention = kv is not None
mask_reshape = (bs, 1, qlen, -1) if mask.dim() == 3 else (bs, 1, 1, -1)
q = self.q_lin(input).view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
if cache is not None:
if isinstance(cache, EncoderDecoderCache):
is_updated = cache.is_updated.get(self.layer_id)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = cache.cross_attention_cache
else:
curr_past_key_values = cache.self_attention_cache
else:
curr_past_key_values = cache
current_states = kv if is_cross_attention else input
if is_cross_attention and cache is not None and is_updated:
# reuse k,v, cross_attentions
k = curr_past_key_values.key_cache[self.layer_id]
v = curr_past_key_values.value_cache[self.layer_id]
else:
k = self.k_lin(current_states)
v = self.v_lin(current_states)
k = k.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
v = v.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
if cache is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
k, v = curr_past_key_values.update(k, v, self.layer_id, {"cache_position": cache_position})
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention:
cache.is_updated[self.layer_id] = True
q = q / math.sqrt(self.head_dim) # (bs, n_heads, qlen, head_dim)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
context = torch.matmul(weights, v) # (bs, n_heads, qlen, head_dim)
context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.head_dim)
outputs = (self.out_lin(context),)
if output_attentions:
outputs = outputs + (weights,)
return outputs
# Copied from transformers.models.xlm.modeling_xlm.TransformerFFN
|
MultiHeadAttention
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/F821_19.py
|
{
"start": 170,
"end": 427
}
|
class ____:
# OK: Allow list comprehensions in annotations (i.e., treat `qux` as a valid
# load in the scope of the annotation).
baz: Annotated[
str,
[qux for qux in foo],
]
# Error: `y` is not defined.
x: (y := 1)
print(y)
|
Bar
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 118040,
"end": 118191
}
|
class ____:
xlStandardSummary = 1 # from enum XlSummaryReportType
xlSummaryPivotTable = -4148 # from enum XlSummaryReportType
|
SummaryReportType
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/models.py
|
{
"start": 2184,
"end": 11067
}
|
class ____(models.Model):
"""
An Application instance represents a Client on the Authorization server.
Usually an Application is created manually by client's developers after
logging in on an Authorization Server.
Fields:
* :attr:`client_id` The client identifier issued to the client during the
registration process as described in :rfc:`2.2`
* :attr:`user` ref to a Django user
* :attr:`redirect_uris` The list of allowed redirect uri. The string
consists of valid URLs separated by space
* :attr:`post_logout_redirect_uris` The list of allowed redirect uris after
an RP initiated logout. The string
consists of valid URLs separated by space
* :attr:`client_type` Client type as described in :rfc:`2.1`
* :attr:`authorization_grant_type` Authorization flows available to the
Application
* :attr:`client_secret` Confidential secret issued to the client during
the registration process as described in :rfc:`2.2`
* :attr:`name` Friendly name for the Application
"""
CLIENT_CONFIDENTIAL = "confidential"
CLIENT_PUBLIC = "public"
CLIENT_TYPES = (
(CLIENT_CONFIDENTIAL, _("Confidential")),
(CLIENT_PUBLIC, _("Public")),
)
GRANT_AUTHORIZATION_CODE = "authorization-code"
GRANT_DEVICE_CODE = "urn:ietf:params:oauth:grant-type:device_code"
GRANT_IMPLICIT = "implicit"
GRANT_PASSWORD = "password"
GRANT_CLIENT_CREDENTIALS = "client-credentials"
GRANT_OPENID_HYBRID = "openid-hybrid"
GRANT_TYPES = (
(GRANT_AUTHORIZATION_CODE, _("Authorization code")),
(GRANT_DEVICE_CODE, _("Device Code")),
(GRANT_IMPLICIT, _("Implicit")),
(GRANT_PASSWORD, _("Resource owner password-based")),
(GRANT_CLIENT_CREDENTIALS, _("Client credentials")),
(GRANT_OPENID_HYBRID, _("OpenID connect hybrid")),
)
NO_ALGORITHM = ""
RS256_ALGORITHM = "RS256"
HS256_ALGORITHM = "HS256"
ALGORITHM_TYPES = (
(NO_ALGORITHM, _("No OIDC support")),
(RS256_ALGORITHM, _("RSA with SHA-2 256")),
(HS256_ALGORITHM, _("HMAC with SHA-2 256")),
)
id = models.BigAutoField(primary_key=True)
client_id = models.CharField(max_length=100, unique=True, default=generate_client_id, db_index=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="%(app_label)s_%(class)s",
null=True,
blank=True,
on_delete=models.CASCADE,
)
redirect_uris = models.TextField(
blank=True,
help_text=_("Allowed URIs list, space separated"),
)
post_logout_redirect_uris = models.TextField(
blank=True,
help_text=_("Allowed Post Logout URIs list, space separated"),
default="",
)
client_type = models.CharField(max_length=32, choices=CLIENT_TYPES)
authorization_grant_type = models.CharField(max_length=44, choices=GRANT_TYPES)
client_secret = ClientSecretField(
max_length=255,
blank=True,
default=generate_client_secret,
db_index=True,
help_text=_("Hashed on Save. Copy it now if this is a new secret."),
)
hash_client_secret = models.BooleanField(default=True)
name = models.CharField(max_length=255, blank=True)
skip_authorization = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
algorithm = models.CharField(max_length=5, choices=ALGORITHM_TYPES, default=NO_ALGORITHM, blank=True)
allowed_origins = models.TextField(
blank=True,
help_text=_("Allowed origins list to enable CORS, space separated"),
default="",
)
class Meta:
abstract = True
def __str__(self):
return self.name or self.client_id
@property
def default_redirect_uri(self):
"""
Returns the default redirect_uri, *if* only one is registered.
"""
if self.redirect_uris:
uris = self.redirect_uris.split()
if len(uris) == 1:
return self.redirect_uris.split().pop(0)
raise errors.MissingRedirectURIError()
assert False, (
"If you are using implicit, authorization_code "
"or all-in-one grant_type, you must define "
"redirect_uris field in your Application model"
)
def redirect_uri_allowed(self, uri):
"""
Checks if given url is one of the items in :attr:`redirect_uris` string
:param uri: Url to check
"""
return redirect_to_uri_allowed(uri, self.redirect_uris.split())
def post_logout_redirect_uri_allowed(self, uri):
"""
Checks if given URI is one of the items in :attr:`post_logout_redirect_uris` string
:param uri: URI to check
"""
return redirect_to_uri_allowed(uri, self.post_logout_redirect_uris.split())
def origin_allowed(self, origin):
"""
Checks if given origin is one of the items in :attr:`allowed_origins` string
:param origin: Origin to check
"""
return self.allowed_origins and is_origin_allowed(origin, self.allowed_origins.split())
def clean(self):
from django.core.exceptions import ValidationError
grant_types = (
AbstractApplication.GRANT_AUTHORIZATION_CODE,
AbstractApplication.GRANT_IMPLICIT,
AbstractApplication.GRANT_OPENID_HYBRID,
)
hs_forbidden_grant_types = (
AbstractApplication.GRANT_IMPLICIT,
AbstractApplication.GRANT_OPENID_HYBRID,
)
redirect_uris = self.redirect_uris.strip().split()
allowed_schemes = set(s.lower() for s in self.get_allowed_schemes())
if redirect_uris:
validator = AllowedURIValidator(
allowed_schemes,
name="redirect uri",
allow_path=True,
allow_query=True,
allow_hostname_wildcard=oauth2_settings.ALLOW_URI_WILDCARDS,
)
for uri in redirect_uris:
validator(uri)
elif self.authorization_grant_type in grant_types:
raise ValidationError(
_("redirect_uris cannot be empty with grant_type {grant_type}").format(
grant_type=self.authorization_grant_type
)
)
allowed_origins = self.allowed_origins.strip().split()
if allowed_origins:
# oauthlib allows only https scheme for CORS
validator = AllowedURIValidator(
oauth2_settings.ALLOWED_SCHEMES,
"allowed origin",
allow_hostname_wildcard=oauth2_settings.ALLOW_URI_WILDCARDS,
)
for uri in allowed_origins:
validator(uri)
if self.algorithm == AbstractApplication.RS256_ALGORITHM:
if not oauth2_settings.OIDC_RSA_PRIVATE_KEY:
raise ValidationError(_("You must set OIDC_RSA_PRIVATE_KEY to use RSA algorithm"))
if self.algorithm == AbstractApplication.HS256_ALGORITHM:
if any(
(
self.authorization_grant_type in hs_forbidden_grant_types,
self.client_type == Application.CLIENT_PUBLIC,
)
):
raise ValidationError(_("You cannot use HS256 with public grants or clients"))
def get_absolute_url(self):
return reverse("oauth2_provider:detail", args=[str(self.pk)])
def get_allowed_schemes(self):
"""
Returns the list of redirect schemes allowed by the Application.
By default, returns `ALLOWED_REDIRECT_URI_SCHEMES`.
"""
return oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES
def allows_grant_type(self, *grant_types):
return self.authorization_grant_type in grant_types
def is_usable(self, request):
"""
Determines whether the application can be used.
:param request: The oauthlib.common.Request being processed.
"""
return True
@property
def jwk_key(self):
if self.algorithm == AbstractApplication.RS256_ALGORITHM:
if not oauth2_settings.OIDC_RSA_PRIVATE_KEY:
raise ImproperlyConfigured("You must set OIDC_RSA_PRIVATE_KEY to use RSA algorithm")
return jwk_from_pem(oauth2_settings.OIDC_RSA_PRIVATE_KEY)
elif self.algorithm == AbstractApplication.HS256_ALGORITHM:
return jwk.JWK(kty="oct", k=base64url_encode(self.client_secret))
raise ImproperlyConfigured("This application does not support signed tokens")
|
AbstractApplication
|
python
|
Textualize__textual
|
docs/examples/how-to/layout04.py
|
{
"start": 402,
"end": 580
}
|
class ____(Screen):
def compose(self) -> ComposeResult:
yield Header(id="Header")
yield Footer(id="Footer")
yield HorizontalScroll() # (1)!
|
TweetScreen
|
python
|
python__mypy
|
mypy/nodes.py
|
{
"start": 99430,
"end": 100165
}
|
class ____(Expression):
"""NewType expression NewType(...)."""
__slots__ = ("name", "old_type", "info")
__match_args__ = ("name", "old_type", "info")
name: str
# The base type (the second argument to NewType)
old_type: mypy.types.Type | None
# The synthesized class representing the new type (inherits old_type)
info: TypeInfo | None
def __init__(
self, name: str, old_type: mypy.types.Type | None, line: int, column: int
) -> None:
super().__init__(line=line, column=column)
self.name = name
self.old_type = old_type
self.info = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_newtype_expr(self)
|
NewTypeExpr
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/type_inference.py
|
{
"start": 16435,
"end": 18706
}
|
class ____(cfg.GraphVisitor):
"""CFG visitor that propagates type information across statements."""
def __init__(self, graph, resolver, namespace, scope, closure_types):
"""Creates a new analyzer.
Args:
graph: cfg.Graph
resolver: Resolver
namespace: Dict[str, Any]
scope: activity.Scope
closure_types: Dict[QN, Set]
"""
super(Analyzer, self).__init__(graph)
self.resolver = resolver
self.namespace = namespace
self.scope = scope
self.closure_types = closure_types
context_types = {
n: t for n, t in closure_types.items() if n not in scope.bound
}
if context_types:
self.context_types = _TypeMap()
self.context_types.types = context_types
else:
self.context_types = None
def init_state(self, _):
return _TypeMap()
def _update_closure_types(self, ast_node, types):
existing_types = anno.Static.CLOSURE_TYPES.of(ast_node, None)
if existing_types is None:
existing_types = {}
anno.Static.CLOSURE_TYPES.add_to(ast_node, existing_types)
for k, v in types.types.items():
if k in existing_types:
existing_types[k].update(v)
else:
existing_types[k] = set(v)
def visit_node(self, node):
prev_types_out = self.out[node]
types_in = _TypeMap()
for n in node.prev:
types_in |= self.out[n]
if (self.context_types is not None) and (node is self.graph.entry):
types_in |= self.context_types
types_out = _TypeMap(types_in)
ast_node = node.ast_node
inferrer = StmtInferrer(self.resolver, self.scope, self.namespace,
self.closure_types, types_in)
inferrer.visit(ast_node)
types_out.types.update(inferrer.new_symbols)
reaching_fndefs = anno.Static.DEFINED_FNS_IN.of(ast_node)
node_scope = anno.Static.SCOPE.of(ast_node, None)
if node_scope is not None:
# TODO(mdan): Check that it's actually safe to skip nodes without scope.
reads = {str(qn) for qn in node_scope.read}
for def_node in reaching_fndefs:
if def_node.name in reads:
self._update_closure_types(def_node, types_out)
self.in_[node] = types_in
self.out[node] = types_out
return prev_types_out != types_out
|
Analyzer
|
python
|
tiangolo__fastapi
|
tests/test_default_response_class.py
|
{
"start": 358,
"end": 5365
}
|
class ____(JSONResponse):
media_type = "application/x-override"
app = FastAPI(default_response_class=ORJSONResponse)
router_a = APIRouter()
router_a_a = APIRouter()
router_a_b_override = APIRouter() # Overrides default class
router_b_override = APIRouter() # Overrides default class
router_b_a = APIRouter()
router_b_a_c_override = APIRouter() # Overrides default class again
@app.get("/")
def get_root():
return {"msg": "Hello World"}
@app.get("/override", response_class=PlainTextResponse)
def get_path_override():
return "Hello World"
@router_a.get("/")
def get_a():
return {"msg": "Hello A"}
@router_a.get("/override", response_class=PlainTextResponse)
def get_a_path_override():
return "Hello A"
@router_a_a.get("/")
def get_a_a():
return {"msg": "Hello A A"}
@router_a_a.get("/override", response_class=PlainTextResponse)
def get_a_a_path_override():
return "Hello A A"
@router_a_b_override.get("/")
def get_a_b():
return "Hello A B"
@router_a_b_override.get("/override", response_class=HTMLResponse)
def get_a_b_path_override():
return "Hello A B"
@router_b_override.get("/")
def get_b():
return "Hello B"
@router_b_override.get("/override", response_class=HTMLResponse)
def get_b_path_override():
return "Hello B"
@router_b_a.get("/")
def get_b_a():
return "Hello B A"
@router_b_a.get("/override", response_class=HTMLResponse)
def get_b_a_path_override():
return "Hello B A"
@router_b_a_c_override.get("/")
def get_b_a_c():
return "Hello B A C"
@router_b_a_c_override.get("/override", response_class=OverrideResponse)
def get_b_a_c_path_override():
return {"msg": "Hello B A C"}
router_b_a.include_router(
router_b_a_c_override, prefix="/c", default_response_class=HTMLResponse
)
router_b_override.include_router(router_b_a, prefix="/a")
router_a.include_router(router_a_a, prefix="/a")
router_a.include_router(
router_a_b_override, prefix="/b", default_response_class=PlainTextResponse
)
app.include_router(router_a, prefix="/a")
app.include_router(
router_b_override, prefix="/b", default_response_class=PlainTextResponse
)
client = TestClient(app)
orjson_type = "application/x-orjson"
text_type = "text/plain; charset=utf-8"
html_type = "text/html; charset=utf-8"
override_type = "application/x-override"
def test_app():
with client:
response = client.get("/")
assert response.json() == {"msg": "Hello World"}
assert response.headers["content-type"] == orjson_type
def test_app_override():
with client:
response = client.get("/override")
assert response.content == b"Hello World"
assert response.headers["content-type"] == text_type
def test_router_a():
with client:
response = client.get("/a")
assert response.json() == {"msg": "Hello A"}
assert response.headers["content-type"] == orjson_type
def test_router_a_override():
with client:
response = client.get("/a/override")
assert response.content == b"Hello A"
assert response.headers["content-type"] == text_type
def test_router_a_a():
with client:
response = client.get("/a/a")
assert response.json() == {"msg": "Hello A A"}
assert response.headers["content-type"] == orjson_type
def test_router_a_a_override():
with client:
response = client.get("/a/a/override")
assert response.content == b"Hello A A"
assert response.headers["content-type"] == text_type
def test_router_a_b():
with client:
response = client.get("/a/b")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == text_type
def test_router_a_b_override():
with client:
response = client.get("/a/b/override")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == html_type
def test_router_b():
with client:
response = client.get("/b")
assert response.content == b"Hello B"
assert response.headers["content-type"] == text_type
def test_router_b_override():
with client:
response = client.get("/b/override")
assert response.content == b"Hello B"
assert response.headers["content-type"] == html_type
def test_router_b_a():
with client:
response = client.get("/b/a")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == text_type
def test_router_b_a_override():
with client:
response = client.get("/b/a/override")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == html_type
def test_router_b_a_c():
with client:
response = client.get("/b/a/c")
assert response.content == b"Hello B A C"
assert response.headers["content-type"] == html_type
def test_router_b_a_c_override():
with client:
response = client.get("/b/a/c/override")
assert response.json() == {"msg": "Hello B A C"}
assert response.headers["content-type"] == override_type
|
OverrideResponse
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/tests/test_sdk.py
|
{
"start": 2919,
"end": 3938
}
|
class ____:
node_id: str
node_status: NodeStatus
idle_time_check_cb: Optional[Callable] = None
labels: Optional[dict] = None
def assert_node_states(
state: ClusterResourceState, expected_nodes: List[ExpectedNodeState]
):
"""
Assert a GetClusterResourceStateReply has node states that
matches with the expected nodes.
"""
assert len(state.node_states) == len(expected_nodes)
# Sort all the nodes by node's node_id
node_states = sorted(state.node_states, key=lambda node: node.node_id)
expected_nodes = sorted(expected_nodes, key=lambda node: node.node_id)
for actual_node, expected_node in zip(node_states, expected_nodes):
assert actual_node.status == expected_node.node_status
if expected_node.idle_time_check_cb:
assert expected_node.idle_time_check_cb(actual_node.idle_duration_ms)
if expected_node.labels:
assert sorted(actual_node.dynamic_labels) == sorted(expected_node.labels)
@dataclass
|
ExpectedNodeState
|
python
|
pytest-dev__pytest-asyncio
|
docs/reference/markers/class_scoped_loop_custom_policies_strict_mode_example.py
|
{
"start": 334,
"end": 453
}
|
class ____:
@pytest.mark.asyncio
async def test_parametrized_loop(self):
pass
|
TestWithDifferentLoopPolicies
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_ssh.py
|
{
"start": 26222,
"end": 31537
}
|
class ____:
def test_load_ssh_public_key_unsupported(self, backend):
ssh_key = b"ecdsa-sha2-junk AAAAE2VjZHNhLXNoYTItbmlzdHAyNTY="
with raises_unsupported_algorithm(None):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_bad_format(self, backend):
ssh_key = b"ssh-rsa not-a-real-key"
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_rsa_too_short(self, backend):
ssh_key = b"ssh-rsa"
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_truncated_int(self, backend):
ssh_key = b"ssh-rsa AAAAB3NzaC1yc2EAAAA="
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
ssh_key = b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAACKr+IHXo"
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_rsa_comment_with_spaces(self, backend):
ssh_key = (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDu/XRP1kyK6Cgt36gts9XAk"
b"FiiuJLW6RU0j3KKVZSs1I7Z3UmU9/9aVh/rZV43WQG8jaR6kkcP4stOR0DEtll"
b"PDA7ZRBnrfiHpSQYQ874AZaAoIjgkv7DBfsE6gcDQLub0PFjWyrYQUJhtOLQEK"
b"vY/G0vt2iRL3juawWmCFdTK3W3XvwAdgGk71i6lHt+deOPNEPN2H58E4odrZ2f"
b"sxn/adpDqfb2sM0kPwQs0aWvrrKGvUaustkivQE4XWiSFnB0oJB/lKK/CKVKuy"
b"///ImSCGHQRvhwariN2tvZ6CBNSLh3iQgeB0AkyJlng7MXB2qYq/Ci2FUOryCX"
# Extra section appended
b"2MzHvnbv testkey@localhost extra"
)
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_rsa_extra_data_after_modulo(self, backend):
ssh_key = (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDu/XRP1kyK6Cgt36gts9XAk"
b"FiiuJLW6RU0j3KKVZSs1I7Z3UmU9/9aVh/rZV43WQG8jaR6kkcP4stOR0DEtll"
b"PDA7ZRBnrfiHpSQYQ874AZaAoIjgkv7DBfsE6gcDQLub0PFjWyrYQUJhtOLQEK"
b"vY/G0vt2iRL3juawWmCFdTK3W3XvwAdgGk71i6lHt+deOPNEPN2H58E4odrZ2f"
b"sxn/adpDqfb2sM0kPwQs0aWvrrKGvUaustkivQE4XWiSFnB0oJB/lKK/CKVKuy"
b"///ImSCGHQRvhwariN2tvZ6CBNSLh3iQgeB0AkyJlng7MXB2qYq/Ci2FUOryCX"
b"2MzHvnbvAQ== testkey@localhost"
)
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_rsa_different_string(self, backend):
ssh_key = (
# "AAAAB3NzA" the final A is capitalized here to cause the string
# ssh-rsa inside the base64 encoded blob to be incorrect. It should
# be a lower case 'a'.
b"ssh-rsa AAAAB3NzAC1yc2EAAAADAQABAAABAQDDu/XRP1kyK6Cgt36gts9XAk"
b"FiiuJLW6RU0j3KKVZSs1I7Z3UmU9/9aVh/rZV43WQG8jaR6kkcP4stOR0DEtll"
b"PDA7ZRBnrfiHpSQYQ874AZaAoIjgkv7DBfsE6gcDQLub0PFjWyrYQUJhtOLQEK"
b"vY/G0vt2iRL3juawWmCFdTK3W3XvwAdgGk71i6lHt+deOPNEPN2H58E4odrZ2f"
b"sxn/adpDqfb2sM0kPwQs0aWvrrKGvUaustkivQE4XWiSFnB0oJB/lKK/CKVKuy"
b"///ImSCGHQRvhwariN2tvZ6CBNSLh3iQgeB0AkyJlng7MXB2qYq/Ci2FUOryCX"
b"2MzHvnbvAQ== testkey@localhost"
)
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_rsa(self, backend):
ssh_key = (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDu/XRP1kyK6Cgt36gts9XAk"
b"FiiuJLW6RU0j3KKVZSs1I7Z3UmU9/9aVh/rZV43WQG8jaR6kkcP4stOR0DEtll"
b"PDA7ZRBnrfiHpSQYQ874AZaAoIjgkv7DBfsE6gcDQLub0PFjWyrYQUJhtOLQEK"
b"vY/G0vt2iRL3juawWmCFdTK3W3XvwAdgGk71i6lHt+deOPNEPN2H58E4odrZ2f"
b"sxn/adpDqfb2sM0kPwQs0aWvrrKGvUaustkivQE4XWiSFnB0oJB/lKK/CKVKuy"
b"///ImSCGHQRvhwariN2tvZ6CBNSLh3iQgeB0AkyJlng7MXB2qYq/Ci2FUOryCX"
b"2MzHvnbv testkey@localhost"
)
key = load_ssh_public_key(ssh_key, backend)
assert key is not None
assert isinstance(key, rsa.RSAPublicKey)
numbers = key.public_numbers()
expected_e = 0x10001
expected_n = int(
"00C3BBF5D13F59322BA0A0B77EA0B6CF570241628AE24B5BA454D"
"23DCA295652B3523B67752653DFFD69587FAD9578DD6406F23691"
"EA491C3F8B2D391D0312D9653C303B651067ADF887A5241843CEF"
"8019680A088E092FEC305FB04EA070340BB9BD0F1635B2AD84142"
"61B4E2D010ABD8FC6D2FB768912F78EE6B05A60857532B75B75EF"
"C007601A4EF58BA947B7E75E38F3443CDD87E7C138A1DAD9D9FB3"
"19FF69DA43A9F6F6B0CD243F042CD1A5AFAEB286BD46AEB2D922B"
"D01385D6892167074A0907F94A2BF08A54ABB2FFFFC89920861D0"
"46F8706AB88DDADBD9E8204D48B87789081E074024C8996783B31"
"7076A98ABF0A2D8550EAF2097D8CCC7BE76EF",
16,
)
expected = rsa.RSAPublicNumbers(expected_e, expected_n)
assert numbers == expected
def test_unsafe_skip_rsa_key_validation(self):
key = load_vectors_from_file(
os.path.join("asymmetric", "OpenSSH", "rsa-nopsw.key"),
lambda f: load_ssh_private_key(
f.read(), password=None, unsafe_skip_rsa_key_validation=True
),
mode="rb",
)
assert isinstance(key, rsa.RSAPrivateKey)
|
TestRSASSHSerialization
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 104927,
"end": 105474
}
|
class ____(nn.Module):
def __init__(self, config: Qwen3OmniMoeTalkerConfig):
super().__init__()
self.linear_fc1 = nn.Linear(config.thinker_hidden_size, config.text_config.intermediate_size, bias=True)
self.linear_fc2 = nn.Linear(config.text_config.intermediate_size, config.text_config.hidden_size, bias=True)
self.act_fn = ACT2FN[config.text_config.hidden_act]
def forward(self, hidden_state):
return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state)))
@dataclass
|
Qwen3OmniMoeTalkerResizeMLP
|
python
|
pypa__pip
|
src/pip/_internal/exceptions.py
|
{
"start": 5316,
"end": 5399
}
|
class ____(PipError):
"""General exception in configuration"""
|
ConfigurationError
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/rules/utils.py
|
{
"start": 3480,
"end": 5947
}
|
class ____(Rule):
timeRange: TimeRange
decayingFn: NotRequired[DecayingFn] # const decaying doesn't require a decayingFn
# Type defining the all the possible rules types that can exist.
PolymorphicRule = Union[Rule, DecayingRule]
def get_rule_hash(rule: PolymorphicRule) -> int:
# We want to be explicit in what we use for computing the hash. In addition, we need to remove certain fields like
# the sampleRate.
return (
orjson.dumps(
{
"id": rule["id"],
"type": rule["type"],
"condition": rule["condition"],
},
option=orjson.OPT_SORT_KEYS,
)
.decode()
.__hash__()
)
def get_user_biases(user_set_biases: list[ActivatableBias] | None) -> list[ActivatableBias]:
if user_set_biases is None:
return DEFAULT_BIASES
id_to_user_bias = {bias["id"]: bias for bias in user_set_biases}
returned_biases = []
for bias in DEFAULT_BIASES:
if bias["id"] in id_to_user_bias:
returned_biases.append(id_to_user_bias[bias["id"]])
else:
returned_biases.append(bias)
return returned_biases
def get_enabled_user_biases(user_set_biases: list[ActivatableBias] | None) -> set[str]:
users_biases = get_user_biases(user_set_biases)
return {bias["id"] for bias in users_biases if bias["active"]}
def get_supported_biases_ids() -> list[str]:
return sorted({bias["id"] for bias in DEFAULT_BIASES})
def apply_dynamic_factor(base_sample_rate: float, x: float) -> float:
"""
This function known as dynamic factor function is used during the rules generation in order to determine the factor
for each rule based on the base_sample_rate of the project.
The high-level idea is that we want to reduce the factor the bigger the base_sample_rate becomes, this is done
because multiplication will exceed 1 very quickly in case we don't reduce the factor.
"""
if x == 0:
raise Exception("A dynamic factor of 0 cannot be set.")
if base_sample_rate < 0.0 or base_sample_rate > 1.0:
raise Exception(
"The dynamic factor function requires a sample rate in the interval [0.0, 1.0]."
)
return float(x / x**base_sample_rate)
def get_redis_client_for_ds() -> StrictRedis[str]:
cluster_key = settings.SENTRY_DYNAMIC_SAMPLING_RULES_REDIS_CLUSTER
return redis.redis_clusters.get(cluster_key)
|
DecayingRule
|
python
|
pytorch__pytorch
|
torch/backends/quantized/__init__.py
|
{
"start": 825,
"end": 1047
}
|
class ____:
def __get__(self, obj, objtype) -> str:
return _get_qengine_str(torch._C._get_qengine())
def __set__(self, obj, val: str) -> None:
torch._C._set_qengine(_get_qengine_id(val))
|
_QEngineProp
|
python
|
pytorch__pytorch
|
test/test_tensorboard.py
|
{
"start": 10243,
"end": 12899
}
|
class ____(BaseTestCase):
@unittest.skipIf(
sys.version_info >= (3, 13),
"numpy failure, likely caused by old tensorboard version",
)
def test_writer(self):
with self.createSummaryWriter() as writer:
sample_rate = 44100
n_iter = 0
writer.add_hparams(
{"lr": 0.1, "bsize": 1}, {"hparam/accuracy": 10, "hparam/loss": 10}
)
writer.add_scalar("data/scalar_systemtime", 0.1, n_iter)
writer.add_scalar("data/scalar_customtime", 0.2, n_iter, walltime=n_iter)
writer.add_scalar("data/new_style", 0.2, n_iter, new_style=True)
writer.add_scalars(
"data/scalar_group",
{
"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter),
},
n_iter,
)
x = np.zeros((32, 3, 64, 64)) # output from network
writer.add_images("Image", x, n_iter) # Tensor
writer.add_image_with_boxes(
"imagebox",
np.zeros((3, 64, 64)),
np.array([[10, 10, 40, 40], [40, 40, 60, 60]]),
n_iter,
)
x = np.zeros(sample_rate * 2)
writer.add_audio("myAudio", x, n_iter)
writer.add_video(
"myVideo", np.random.rand(16, 48, 1, 28, 28).astype(np.float32), n_iter
)
writer.add_text("Text", "text logged at step:" + str(n_iter), n_iter)
writer.add_text("markdown Text", """a|b\n-|-\nc|d""", n_iter)
writer.add_histogram("hist", np.random.rand(100, 100), n_iter)
writer.add_pr_curve(
"xoxo", np.random.randint(2, size=100), np.random.rand(100), n_iter
) # needs tensorboard 0.4RC or later
writer.add_pr_curve_raw(
"prcurve with raw data",
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
n_iter,
)
v = np.array(
[[[1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1]]], dtype=float
)
c = np.array(
[[[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255]]], dtype=int
)
f = np.array([[[0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2]]], dtype=int)
writer.add_mesh("my_mesh", vertices=v, colors=c, faces=f)
|
TestTensorBoardWriter
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI026.py
|
{
"start": 459,
"end": 485
}
|
class ____(Enum): ...
|
FooEnum
|
python
|
gevent__gevent
|
src/greentest/3.14/test_ssl.py
|
{
"start": 66293,
"end": 68716
}
|
class ____(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
try:
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
except RuntimeError:
if Py_DEBUG_WIN32:
self.skipTest("not supported on Win32 debug build")
raise
self.assertEqual(cm.exception.library, 'PEM')
regex = "(NO_START_LINE|UNSUPPORTED_PUBLIC_KEY_TYPE)"
self.assertRegex(cm.exception.reason, regex)
s = str(cm.exception)
self.assertIn("NO_START_LINE", s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertStartsWith(s, "The operation did not complete (read)")
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
|
SSLErrorTests
|
python
|
numba__numba
|
numba/tests/test_conversion.py
|
{
"start": 343,
"end": 6900
}
|
class ____(TestCase):
"""
Testing Python to Native conversion
"""
def test_complex_identity(self):
pyfunc = identity
cfunc = njit(types.complex64(types.complex64))(pyfunc)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
self.assertEqual(cfunc(x), x)
for x in np.complex64(xs):
self.assertEqual(cfunc(x), x)
cfunc = njit(types.complex128(types.complex128))(pyfunc)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
self.assertEqual(cfunc(x), x)
for x in np.complex128(xs):
self.assertEqual(cfunc(x), x)
def test_complex_addition(self):
pyfunc = addition
cfunc = njit(types.complex64(types.complex64, types.complex64))(pyfunc)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
y = x
self.assertEqual(cfunc(x, y), x + y)
for x in np.complex64(xs):
y = x
self.assertEqual(cfunc(x, y), x + y)
cfunc = njit(types.complex128(types.complex128,
types.complex128))(pyfunc)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
y = x
self.assertEqual(cfunc(x, y), x + y)
for x in np.complex128(xs):
y = x
self.assertEqual(cfunc(x, y), x + y)
def test_boolean_as_int(self):
pyfunc = equality
cfunc = njit((types.boolean, types.intp))(pyfunc)
xs = True, False
ys = -1, 0, 1
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
def test_boolean_as_float(self):
pyfunc = equality
cfunc = njit((types.boolean, types.float64))(pyfunc)
xs = True, False
ys = -1, 0, 1
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
def test_boolean_eq_boolean(self):
pyfunc = equality
cfunc = njit((types.boolean, types.boolean))(pyfunc)
xs = True, False
ys = True, False
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
# test when a function parameters are jitted as unsigned types
# the function is called with negative parameters the Python error
# that it generates is correctly handled -- a Python error is returned to the user
# For more info, see the comment in Include/longobject.h for _PyArray_AsByteArray
# which PyLong_AsUnsignedLongLong calls
def test_negative_to_unsigned(self):
def f(x):
return x
with self.assertRaises(OverflowError):
jit('uintp(uintp)', nopython=True)(f)(-5)
# test the switch logic in callwraper.py:build_wrapper() works for more than one argument
# and where the error occurs
def test_multiple_args_negative_to_unsigned(self):
pyfunc = foobar
cfunc = njit(types.uint64(types.uint64, types.uint64,
types.uint64),)(pyfunc)
test_fail_args = ((-1, 0, 1), (0, -1, 1), (0, 1, -1))
with self.assertRaises(OverflowError):
for a, b, c in test_fail_args:
cfunc(a, b, c)
# test switch logic of callwraper.py:build_wrapper() with records as function parameters
def test_multiple_args_records(self):
pyfunc = foobar
mystruct_dt = np.dtype([('p', np.float64),
('row', np.float64),
('col', np.float64)])
mystruct = numpy_support.from_dtype(mystruct_dt)
cfunc = njit(mystruct[:](mystruct[:], types.uint64,
types.uint64),)(pyfunc)
st1 = np.recarray(3, dtype=mystruct_dt)
st1.p = np.arange(st1.size) + 1
st1.row = np.arange(st1.size) + 1
st1.col = np.arange(st1.size) + 1
with self.assertRefCount(st1):
test_fail_args = ((st1, -1, 1), (st1, 1, -1))
for a, b, c in test_fail_args:
with self.assertRaises(OverflowError):
cfunc(a, b, c)
del test_fail_args, a, b, c
gc.collect()
# test switch logic of callwraper.py:build_wrapper() with no function parameters
def test_with_no_parameters(self):
def f():
pass
self.assertEqual(f(), jit('()', nopython=True)(f)())
def check_argument_cleanup(self, typ, obj):
"""
Check that argument cleanup doesn't leak references.
"""
def f(x, y):
pass
def _objects(obj):
objs = [obj]
if isinstance(obj, tuple):
for v in obj:
objs += _objects(v)
return objs
objects = _objects(obj)
cfunc = njit((typ, types.uint32))(f)
with self.assertRefCount(*objects):
cfunc(obj, 1)
with self.assertRefCount(*objects):
with self.assertRaises(OverflowError):
cfunc(obj, -1)
cfunc = njit((types.uint32, typ))(f)
with self.assertRefCount(*objects):
cfunc(1, obj)
with self.assertRefCount(*objects):
with self.assertRaises(OverflowError):
cfunc(-1, obj)
def test_cleanup_buffer(self):
mem = memoryview(bytearray(b"xyz"))
self.check_argument_cleanup(types.MemoryView(types.byte, 1, 'C'), mem)
def test_cleanup_record(self):
dtype = np.dtype([('x', np.float64), ('y', np.float64)])
recarr = np.zeros(1, dtype=dtype)
self.check_argument_cleanup(numpy_support.from_dtype(dtype), recarr[0])
def test_cleanup_tuple(self):
mem = memoryview(bytearray(b"xyz"))
tp = types.UniTuple(types.MemoryView(types.byte, 1, 'C'), 2)
self.check_argument_cleanup(tp, (mem, mem))
def test_cleanup_optional(self):
mem = memoryview(bytearray(b"xyz"))
tp = types.Optional(types.MemoryView(types.byte, 1, 'C'))
self.check_argument_cleanup(tp, mem)
def test_stringliteral_to_unicode(self):
# See issue #6907, explicit signature on bar() takes a unicode_type but
# the call to bar() in foo() is with a StringLiteral
@jit(types.void(types.unicode_type), nopython=True)
def bar(string):
pass
@jit(types.void(), nopython=True)
def foo2():
bar("literal string")
if __name__ == '__main__':
unittest.main()
|
TestConversion
|
python
|
pytorch__pytorch
|
torch/testing/_internal/autograd_function_db.py
|
{
"start": 3214,
"end": 4850
}
|
class ____(torch.autograd.Function):
@staticmethod
def forward(x, y):
return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
@staticmethod
def setup_context(ctx, inputs, output):
ctx.save_for_backward(*inputs)
ctx.save_for_forward(*inputs)
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
gx = None
if ctx.needs_input_grad[0]:
gx = NumpyMul.apply(grad_output, y)
gy = None
if ctx.needs_input_grad[1]:
gy = NumpyMul.apply(grad_output, x)
return gx, gy
@staticmethod
def vmap(info, in_dims, x, y):
x_bdim, y_bdim = in_dims
x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
result = NumpyMul.apply(x, y)
result = result.movedim(-1, 0)
return result, 0
@staticmethod
def jvp(ctx, x_tangent, y_tangent):
x, y = ctx.saved_tensors
return x_tangent * y + y_tangent * x
def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Broadcasting
yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14})
|
NumpyMul
|
python
|
nedbat__coveragepy
|
tests/test_testing.py
|
{
"start": 1040,
"end": 8326
}
|
class ____(CoverageTest):
"""Test the methods in `CoverageTest`."""
def test_file_exists(self) -> None:
self.make_file("whoville.txt", "We are here!")
self.assert_exists("whoville.txt")
self.assert_doesnt_exist("shadow.txt")
msg = "File 'whoville.txt' shouldn't exist"
with pytest.raises(AssertionError, match=msg):
self.assert_doesnt_exist("whoville.txt")
msg = "File 'shadow.txt' should exist"
with pytest.raises(AssertionError, match=msg):
self.assert_exists("shadow.txt")
def test_file_count(self) -> None:
self.make_file("abcde.txt", "abcde")
self.make_file("axczz.txt", "axczz")
self.make_file("afile.txt", "afile")
self.assert_file_count("a*.txt", 3)
self.assert_file_count("*c*.txt", 2)
self.assert_file_count("afile.*", 1)
self.assert_file_count("*.q", 0)
msg = re.escape(
"There should be 13 files matching 'a*.txt', but there are these: "
+ "['abcde.txt', 'afile.txt', 'axczz.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("a*.txt", 13)
msg = re.escape(
"There should be 12 files matching '*c*.txt', but there are these: "
+ "['abcde.txt', 'axczz.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("*c*.txt", 12)
msg = re.escape(
"There should be 11 files matching 'afile.*', but there are these: ['afile.txt']",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("afile.*", 11)
msg = re.escape(
"There should be 10 files matching '*.q', but there are these: []",
)
with pytest.raises(AssertionError, match=msg):
self.assert_file_count("*.q", 10)
def test_assert_recent_datetime(self) -> None:
def now_delta(seconds: int) -> datetime.datetime:
"""Make a datetime `seconds` seconds from now."""
return datetime.datetime.now() + datetime.timedelta(seconds=seconds)
# Default delta is 10 seconds.
self.assert_recent_datetime(now_delta(0))
self.assert_recent_datetime(now_delta(-9))
with pytest.raises(AssertionError):
self.assert_recent_datetime(now_delta(-11))
with pytest.raises(AssertionError):
self.assert_recent_datetime(now_delta(1))
# Delta is settable.
self.assert_recent_datetime(now_delta(0), seconds=120)
self.assert_recent_datetime(now_delta(-100), seconds=120)
with pytest.raises(AssertionError):
self.assert_recent_datetime(now_delta(-1000), seconds=120)
with pytest.raises(AssertionError):
self.assert_recent_datetime(now_delta(1), seconds=120)
def test_assert_warnings(self) -> None:
cov = coverage.Coverage()
# Make a warning, it should catch it properly.
with self.assert_warnings(cov, ["Hello there!"]):
cov._warn("Hello there!")
# The expected warnings are regexes.
with self.assert_warnings(cov, ["Hello.*!"]):
cov._warn("Hello there!")
# There can be a bunch of actual warnings.
with self.assert_warnings(cov, ["Hello.*!"]):
cov._warn("You there?")
cov._warn("Hello there!")
# There can be a bunch of expected warnings.
with self.assert_warnings(cov, ["Hello.*!", "You"]):
cov._warn("You there?")
cov._warn("Hello there!")
# But if there are a bunch of expected warnings, they have to all happen.
warn_regex = r"Didn't find warning 'You' in \['Hello there!'\]"
with pytest.raises(AssertionError, match=warn_regex):
with self.assert_warnings(cov, ["Hello.*!", "You"]):
cov._warn("Hello there!")
# Make a different warning than expected, it should raise an assertion.
warn_regex = r"Didn't find warning 'Not me' in \['Hello there!'\]"
with pytest.raises(AssertionError, match=warn_regex):
with self.assert_warnings(cov, ["Not me"]):
cov._warn("Hello there!")
# Try checking a warning that shouldn't appear: happy case.
with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]):
cov._warn("Hi")
# But it should fail if the unexpected warning does appear.
warn_regex = r"Found warning 'Bye' in \['Hi', 'Bye'\]"
with pytest.raises(AssertionError, match=warn_regex):
with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]):
cov._warn("Hi")
cov._warn("Bye")
# assert_warnings shouldn't hide a real exception.
with pytest.raises(ZeroDivisionError, match="oops"):
with self.assert_warnings(cov, ["Hello there!"]):
raise ZeroDivisionError("oops")
def test_assert_no_warnings(self) -> None:
cov = coverage.Coverage()
# Happy path: no warnings.
with self.assert_warnings(cov, []):
pass
# If you said there would be no warnings, and there were, fail!
warn_regex = r"Unexpected warnings: \['Watch out!'\]"
with pytest.raises(AssertionError, match=warn_regex):
with self.assert_warnings(cov, []):
cov._warn("Watch out!")
def test_sub_python_is_this_python(self) -> None:
# Try it with a Python command.
self.set_environ("COV_FOOBAR", "XYZZY")
self.make_file(
"showme.py",
"""\
import os, sys
print(sys.executable)
print(os.__file__)
print(os.environ['COV_FOOBAR'])
""",
)
out_lines = self.run_command("python showme.py").splitlines()
assert actual_path(out_lines[0]) == actual_path(sys.executable)
assert out_lines[1] == os.__file__
assert out_lines[2] == "XYZZY"
# Try it with a "coverage debug sys" command.
out = self.run_command("coverage debug sys")
executable = re_line("executable:", out)
executable = executable.split(":", 1)[1].strip()
assert _same_python_executable(executable, sys.executable)
# "environment: COV_FOOBAR = XYZZY" or "COV_FOOBAR = XYZZY"
environ = re_line("COV_FOOBAR", out)
_, _, environ = environ.rpartition(":")
assert environ.strip() == "COV_FOOBAR = XYZZY"
def test_run_command_stdout_stderr(self) -> None:
# run_command should give us both stdout and stderr.
self.make_file(
"outputs.py",
"""\
import sys
sys.stderr.write("StdErr\\n")
print("StdOut")
""",
)
out = self.run_command("python outputs.py")
assert "StdOut\n" in out
assert "StdErr\n" in out
def test_stdout(self) -> None:
# stdout is captured.
print("This is stdout")
print("Line 2")
assert self.stdout() == "This is stdout\nLine 2\n"
# When we grab stdout(), it's reset.
print("Some more")
assert self.stdout() == "Some more\n"
|
CoverageTestTest
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 8842,
"end": 8952
}
|
class ____(ProxyBase):
name = models.CharField(max_length=30)
# base -> proxy -> real models
|
NonProxyChild
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/instance_manager/node_provider.py
|
{
"start": 9198,
"end": 18610
}
|
class ____(ICloudInstanceProvider):
"""
Warps a NodeProviderV1 to a ICloudInstanceProvider.
TODO(rickyx):
The current adapter right now consists of two sets of APIs:
- v1: the old APIs that are used by the autoscaler, where
we forward the calls to the NodeProviderV1.
- v2: the new APIs that are used by the autoscaler v2, this is
defined in the ICloudInstanceProvider interface.
We should eventually remove the v1 APIs and only use the v2 APIs.
It's currently left as a TODO since changing the v1 APIs would
requires a lot of changes in the cluster launcher codebase.
"""
def __init__(
self,
v1_provider: NodeProviderV1,
config_reader: IConfigReader,
max_launch_batch_per_type: int = AUTOSCALER_MAX_LAUNCH_BATCH,
max_concurrent_launches: int = AUTOSCALER_MAX_CONCURRENT_LAUNCHES,
) -> None:
"""
Args:
v1_provider: The v1 node provider to wrap.
config_reader: The config reader to read the autoscaling config.
max_launch_batch_per_type: The maximum number of nodes to launch per
node type in a single batch.
max_concurrent_launches: The maximum number of concurrent launches.
"""
super().__init__()
self._v1_provider = v1_provider
self._config_reader = config_reader
# Executor to async launching and terminating nodes.
self._main_executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="ray::NodeProviderAdapter"
)
# v1 legacy rate limiting on the node provider launch calls.
self._max_launch_batch_per_type = max_launch_batch_per_type
max_batches = math.ceil(
max_concurrent_launches / float(max_launch_batch_per_type)
)
self._node_launcher_executors = ThreadPoolExecutor(
max_workers=max_batches,
thread_name_prefix="ray::NodeLauncherPool",
)
# Queue to retrieve new errors occur in the multi-thread executors
# temporarily.
self._errors_queue = Queue()
@property
def v1_provider(self) -> NodeProviderV1:
return self._v1_provider
def get_non_terminated(self) -> Dict[CloudInstanceId, CloudInstance]:
nodes = {}
cloud_instance_ids = self._v1_non_terminated_nodes({})
# Filter out nodes that are not running.
# This is efficient since the provider is expected to cache the
# running status of the nodes.
for cloud_instance_id in cloud_instance_ids:
node_tags = self._v1_node_tags(cloud_instance_id)
node_kind_tag = node_tags.get(TAG_RAY_NODE_KIND, NODE_KIND_UNMANAGED)
if node_kind_tag == NODE_KIND_UNMANAGED:
# Filter out unmanaged nodes.
continue
elif node_kind_tag == NODE_KIND_WORKER:
node_kind = NodeKind.WORKER
elif node_kind_tag == NODE_KIND_HEAD:
node_kind = NodeKind.HEAD
else:
raise ValueError(f"Invalid node kind: {node_kind_tag}")
nodes[cloud_instance_id] = CloudInstance(
cloud_instance_id=cloud_instance_id,
node_type=node_tags.get(TAG_RAY_USER_NODE_TYPE, ""),
is_running=self._v1_is_running(cloud_instance_id),
request_id=node_tags.get(TAG_RAY_LAUNCH_REQUEST, ""),
node_kind=node_kind,
)
return nodes
def poll_errors(self) -> List[CloudInstanceProviderError]:
errors = []
while not self._errors_queue.empty():
errors.append(self._errors_queue.get_nowait())
return errors
def launch(
self,
shape: Dict[NodeType, int],
request_id: str,
) -> None:
self._main_executor.submit(self._do_launch, shape, request_id)
def terminate(self, ids: List[CloudInstanceId], request_id: str) -> None:
self._main_executor.submit(self._do_terminate, ids, request_id)
###########################################
# Private APIs
###########################################
def _do_launch(
self,
shape: Dict[NodeType, int],
request_id: str,
) -> None:
"""
Launch the cloud instances by calling into the v1 base node provider.
Args:
shape: The requested to launch node type and number of nodes.
request_id: The request id that identifies the request.
"""
for node_type, count in shape.items():
# Keep submitting the launch requests to the launch pool in batches.
while count > 0:
to_launch = min(count, self._max_launch_batch_per_type)
self._node_launcher_executors.submit(
self._launch_nodes_by_type,
node_type,
to_launch,
request_id,
)
count -= to_launch
def _do_terminate(self, ids: List[CloudInstanceId], request_id: str) -> None:
"""
Terminate the cloud instances by calling into the v1 base node provider.
If errors happen during the termination, the errors will be put into the
errors queue.
Args:
ids: The cloud instance ids to terminate.
request_id: The request id that identifies the request.
"""
try:
self._v1_terminate_nodes(ids)
except Exception as e:
for id in ids:
error = TerminateNodeError(id, request_id, int(time.time_ns()))
error.__cause__ = e
self._errors_queue.put(error)
def _launch_nodes_by_type(
self,
node_type: NodeType,
count: int,
request_id: str,
) -> None:
"""
Launch nodes of the given node type.
Args:
node_type: The node type to launch.
count: Number of nodes to launch.
request_id: A unique id that identifies the request.
Raises:
ValueError: If the node type is invalid.
LaunchNodeError: If the launch failed and raised by the underlying provider.
"""
# Check node type is valid.
try:
config = self._config_reader.get_cached_autoscaling_config()
launch_config = config.get_cloud_node_config(node_type)
resources = config.get_node_resources(node_type)
labels = config.get_node_labels(node_type)
# This is to be compatible with the v1 node launcher.
# See more in https://github.com/ray-project/ray/blob/6f5a189bc463e52c51a70f8aea41fb2950b443e8/python/ray/autoscaler/_private/node_launcher.py#L78-L85 # noqa
# TODO: this should be synced with what's stored in the IM, it should
# probably be made as a metadata field in the cloud instance. This is
# another incompatibility with KubeRay.
launch_hash = hash_launch_conf(launch_config, config.get_config("auth", {}))
node_tags = {
TAG_RAY_NODE_NAME: "ray-{}-worker".format(
config.get_config("cluster_name", "")
),
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
TAG_RAY_LAUNCH_CONFIG: launch_hash,
TAG_RAY_LAUNCH_REQUEST: request_id,
TAG_RAY_USER_NODE_TYPE: node_type,
}
logger.info("Launching {} nodes of type {}.".format(count, node_type))
self._v1_provider.create_node_with_resources_and_labels(
launch_config, node_tags, count, resources, labels
)
logger.info("Launched {} nodes of type {}.".format(count, node_type))
except Exception as e:
logger.info(
"Failed to launch {} nodes of type {}: {}".format(count, node_type, e)
)
error = LaunchNodeError(node_type, count, request_id, int(time.time_ns()))
error.__cause__ = e
self._errors_queue.put(error)
###########################################
# V1 Legacy APIs
###########################################
"""
Below are the necessary legacy APIs from the V1 node provider.
These are needed as of now to provide the needed features
for V2 node provider.
The goal is to eventually remove these APIs and only use the
V2 APIs by modifying the individual node provider to inherit
from ICloudInstanceProvider.
"""
def _v1_terminate_nodes(
self, ids: List[CloudInstanceId]
) -> Optional[Dict[str, Any]]:
return self._v1_provider.terminate_nodes(ids)
def _v1_non_terminated_nodes(
self, tag_filters: Dict[str, str]
) -> List[CloudInstanceId]:
return self._v1_provider.non_terminated_nodes(tag_filters)
def _v1_is_running(self, node_id: CloudInstanceId) -> bool:
return self._v1_provider.is_running(node_id)
def _v1_post_process(self) -> None:
self._v1_provider.post_process()
def _v1_node_tags(self, node_id: CloudInstanceId) -> Dict[str, str]:
return self._v1_provider.node_tags(node_id)
def _v1_safe_to_scale(self) -> bool:
return self._v1_provider.safe_to_scale()
|
NodeProviderAdapter
|
python
|
catalyst-team__catalyst
|
catalyst/core/callback.py
|
{
"start": 5014,
"end": 5230
}
|
class ____(Callback):
"""Checkpoint callback interface, abstraction over checkpoint step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Checkpoint)
|
ICheckpointCallback
|
python
|
pyca__cryptography
|
tests/x509/test_x509.py
|
{
"start": 259851,
"end": 262217
}
|
class ____:
def test_no_attributes(self):
attrs = x509.Attributes([])
assert len(attrs) == 0
def test_get_attribute_for_oid(self):
attr_list = [
x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"nonsense",
),
x509.Attribute(
x509.oid.AttributeOID.UNSTRUCTURED_NAME,
b"montessori",
_ASN1Type.PrintableString.value,
),
]
attrs = x509.Attributes(attr_list)
attr = attrs.get_attribute_for_oid(
x509.oid.AttributeOID.UNSTRUCTURED_NAME
)
assert attr.oid == x509.oid.AttributeOID.UNSTRUCTURED_NAME
assert attr.value == b"montessori"
assert attr._type == _ASN1Type.PrintableString.value
def test_indexing(self):
attr_list = [
x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"nonsense",
),
x509.Attribute(
x509.oid.AttributeOID.UNSTRUCTURED_NAME,
b"montessori",
),
x509.Attribute(
x509.ObjectIdentifier("2.999.2"),
b"meaningless",
),
x509.Attribute(
x509.ObjectIdentifier("2.999.1"),
b"meaningless",
),
]
attrs = x509.Attributes(attr_list)
assert len(attrs) == 4
assert list(attrs) == attr_list
assert attrs[-1] == attrs[3]
assert attrs[0:3:2] == [attrs[0], attrs[2]]
def test_get_attribute_not_found(self):
attrs = x509.Attributes([])
with pytest.raises(x509.AttributeNotFound) as exc:
attrs.get_attribute_for_oid(
x509.oid.AttributeOID.CHALLENGE_PASSWORD
)
assert exc.value.oid == x509.oid.AttributeOID.CHALLENGE_PASSWORD
def test_repr(self):
attrs = x509.Attributes(
[
x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"nonsense",
),
]
)
assert repr(attrs) == (
"<Attributes([<Attribute(oid=<ObjectIdentifier(oid=1.2.840.11354"
"9.1.9.7, name=challengePassword)>, value=b'nonsense')>])>"
)
|
TestAttributes
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 103003,
"end": 103768
}
|
class ____(Structure):
_fields_ = [("count", c_uint),
("sensor", c_nvmlGpuThermalSensor_t * NVML_MAX_THERMAL_SENSORS_PER_GPU)]
_nvmlCoolerControl_t = c_uint
NVML_THERMAL_COOLER_SIGNAL_NONE = 0
NVML_THERMAL_COOLER_SIGNAL_TOGGLE = 1
NVML_THERMAL_COOLER_SIGNAL_VARIABLE = 2
NVML_THERMAL_COOLER_SIGNAL_COUNT = 3
_nvmlCoolerTarget_t = c_uint
NVML_THERMAL_COOLER_TARGET_NONE = (1 << 0)
NVML_THERMAL_COOLER_TARGET_GPU = (1 << 1)
NVML_THERMAL_COOLER_TARGET_MEMORY = (1 << 2)
NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY = (1 << 3)
NVML_THERMAL_COOLER_TARGET_GPU_RELATED = (NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)
|
c_nvmlGpuThermalSettings_t
|
python
|
django__django
|
tests/forms_tests/widget_tests/test_radioselect.py
|
{
"start": 275,
"end": 16967
}
|
class ____(ChoiceWidgetTest):
widget = RadioSelect
def test_render(self):
html = """
<div>
<div>
<label><input type="radio" name="beatle" value="">------</label>
</div>
<div>
<label><input checked type="radio" name="beatle" value="J">John</label>
</div>
<div>
<label><input type="radio" name="beatle" value="P">Paul</label>
</div>
<div>
<label><input type="radio" name="beatle" value="G">George</label>
</div>
<div>
<label><input type="radio" name="beatle" value="R">Ringo</label>
</div>
</div>
"""
beatles_with_blank = BLANK_CHOICE_DASH + self.beatles
for choices in (beatles_with_blank, dict(beatles_with_blank)):
with self.subTest(choices):
self.check_html(self.widget(choices=choices), "beatle", "J", html=html)
def test_nested_choices(self):
nested_choices = (
("unknown", "Unknown"),
("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))),
("Video", (("vhs", "VHS"), ("dvd", "DVD"))),
)
html = """
<div id="media">
<div>
<label for="media_0">
<input type="radio" name="nestchoice" value="unknown" id="media_0"> Unknown
</label></div>
<div>
<label>Audio</label>
<div>
<label for="media_1_0">
<input type="radio" name="nestchoice" value="vinyl" id="media_1_0"> Vinyl
</label></div>
<div> <label for="media_1_1">
<input type="radio" name="nestchoice" value="cd" id="media_1_1"> CD
</label></div>
</div><div>
<label>Video</label>
<div>
<label for="media_2_0">
<input type="radio" name="nestchoice" value="vhs" id="media_2_0"> VHS
</label></div>
<div>
<label for="media_2_1">
<input type="radio" name="nestchoice" value="dvd" id="media_2_1" checked> DVD
</label></div>
</div>
</div>
"""
self.check_html(
self.widget(choices=nested_choices),
"nestchoice",
"dvd",
attrs={"id": "media"},
html=html,
)
def test_render_none(self):
"""
If value is None, none of the options are selected.
"""
choices = BLANK_CHOICE_DASH + self.beatles
html = """
<div>
<div>
<label><input checked type="radio" name="beatle" value="">------</label>
</div>
<div>
<label><input type="radio" name="beatle" value="J">John</label>
</div>
<div>
<label><input type="radio" name="beatle" value="P">Paul</label>
</div>
<div>
<label><input type="radio" name="beatle" value="G">George</label>
</div>
<div>
<label><input type="radio" name="beatle" value="R">Ringo</label>
</div>
</div>
"""
self.check_html(self.widget(choices=choices), "beatle", None, html=html)
def test_render_label_value(self):
"""
If the value corresponds to a label (but not to an option value), none
of the options are selected.
"""
html = """
<div>
<div>
<label><input type="radio" name="beatle" value="J">John</label>
</div>
<div>
<label><input type="radio" name="beatle" value="P">Paul</label>
</div>
<div>
<label><input type="radio" name="beatle" value="G">George</label>
</div>
<div>
<label><input type="radio" name="beatle" value="R">Ringo</label>
</div>
</div>
"""
self.check_html(self.widget(choices=self.beatles), "beatle", "Ringo", html=html)
def test_render_selected(self):
"""
Only one option can be selected.
"""
choices = [("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("0", "extra")]
html = """
<div>
<div>
<label><input checked type="radio" name="choices" value="0">0</label>
</div>
<div>
<label><input type="radio" name="choices" value="1">1</label>
</div>
<div>
<label><input type="radio" name="choices" value="2">2</label>
</div>
<div>
<label><input type="radio" name="choices" value="3">3</label>
</div>
<div>
<label><input type="radio" name="choices" value="0">extra</label>
</div>
</div>
"""
self.check_html(self.widget(choices=choices), "choices", "0", html=html)
def test_constructor_attrs(self):
"""
Attributes provided at instantiation are passed to the constituent
inputs.
"""
widget = self.widget(attrs={"id": "foo"}, choices=self.beatles)
html = """
<div id="foo">
<div>
<label for="foo_0">
<input checked type="radio" id="foo_0" value="J" name="beatle">John</label>
</div>
<div><label for="foo_1">
<input type="radio" id="foo_1" value="P" name="beatle">Paul</label>
</div>
<div><label for="foo_2">
<input type="radio" id="foo_2" value="G" name="beatle">George</label>
</div>
<div><label for="foo_3">
<input type="radio" id="foo_3" value="R" name="beatle">Ringo</label>
</div>
</div>
"""
self.check_html(widget, "beatle", "J", html=html)
def test_compare_to_str(self):
"""
The value is compared to its str().
"""
html = """
<div>
<div>
<label><input type="radio" name="num" value="1">1</label>
</div>
<div>
<label><input type="radio" name="num" value="2">2</label>
</div>
<div>
<label><input checked type="radio" name="num" value="3">3</label>
</div>
</div>
"""
self.check_html(
self.widget(choices=[("1", "1"), ("2", "2"), ("3", "3")]),
"num",
3,
html=html,
)
self.check_html(
self.widget(choices=[(1, 1), (2, 2), (3, 3)]), "num", "3", html=html
)
self.check_html(
self.widget(choices=[(1, 1), (2, 2), (3, 3)]), "num", 3, html=html
)
def test_choices_constructor(self):
widget = self.widget(choices=[(1, 1), (2, 2), (3, 3)])
html = """
<div>
<div>
<label><input type="radio" name="num" value="1">1</label>
</div>
<div>
<label><input type="radio" name="num" value="2">2</label>
</div>
<div>
<label><input checked type="radio" name="num" value="3">3</label>
</div>
</div>
"""
self.check_html(widget, "num", 3, html=html)
def test_choices_constructor_generator(self):
"""
If choices is passed to the constructor and is a generator, it can be
iterated over multiple times without getting consumed.
"""
def get_choices():
for i in range(4):
yield (i, i)
html = """
<div>
<div>
<label><input type="radio" name="num" value="0">0</label>
</div>
<div>
<label><input type="radio" name="num" value="1">1</label>
</div>
<div>
<label><input type="radio" name="num" value="2">2</label>
</div>
<div>
<label><input checked type="radio" name="num" value="3">3</label>
</div>
</div>
"""
widget = self.widget(choices=get_choices())
self.check_html(widget, "num", 3, html=html)
def test_choices_escaping(self):
choices = (("bad", "you & me"), ("good", mark_safe("you > me")))
html = """
<div>
<div>
<label><input type="radio" name="escape" value="bad">you & me</label>
</div>
<div>
<label><input type="radio" name="escape" value="good">you > me</label>
</div>
</div>
"""
self.check_html(self.widget(choices=choices), "escape", None, html=html)
def test_choices_unicode(self):
html = """
<div>
<div>
<label>
<input checked type="radio" name="email"
value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111">
\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label>
</div>
<div>
<label>
<input type="radio" name="email" value="\u0107\u017e\u0161\u0111">
abc\u0107\u017e\u0161\u0111</label>
</div>
</div>
"""
self.check_html(
self.widget(choices=[("ŠĐĆŽćžšđ", "ŠĐabcĆŽćžšđ"), ("ćžšđ", "abcćžšđ")]),
"email",
"ŠĐĆŽćžšđ",
html=html,
)
def test_choices_optgroup(self):
"""
Choices can be nested one level in order to create HTML optgroups.
"""
html = """
<div>
<div>
<label><input type="radio" name="nestchoice" value="outer1">Outer 1</label>
</div>
<div>
<label>Group "1"</label>
<div>
<label>
<input type="radio" name="nestchoice" value="inner1">Inner 1</label>
</div>
<div>
<label>
<input type="radio" name="nestchoice" value="inner2">Inner 2</label>
</div>
</div>
</div>
"""
for widget in self.nested_widgets:
with self.subTest(widget):
self.check_html(widget, "nestchoice", None, html=html)
def test_choices_select_outer(self):
html = """
<div>
<div>
<label>
<input checked type="radio" name="nestchoice" value="outer1">Outer 1</label>
</div>
<div>
<label>Group "1"</label>
<div>
<label>
<input type="radio" name="nestchoice" value="inner1">Inner 1</label>
</div>
<div>
<label>
<input type="radio" name="nestchoice" value="inner2">Inner 2</label>
</div>
</div>
</div>
"""
for widget in self.nested_widgets:
with self.subTest(widget):
self.check_html(widget, "nestchoice", "outer1", html=html)
def test_choices_select_inner(self):
html = """
<div>
<div>
<label><input type="radio" name="nestchoice" value="outer1">Outer 1</label>
</div>
<div>
<label>Group "1"</label>
<div>
<label>
<input type="radio" name="nestchoice" value="inner1">Inner 1</label>
</div>
<div>
<label>
<input checked type="radio" name="nestchoice" value="inner2">Inner 2
</label>
</div>
</div>
</div>
"""
for widget in self.nested_widgets:
with self.subTest(widget):
self.check_html(widget, "nestchoice", "inner2", html=html)
def test_render_attrs(self):
"""
Attributes provided at render-time are passed to the constituent
inputs.
"""
html = """
<div id="bar">
<div>
<label for="bar_0">
<input checked type="radio" id="bar_0" value="J" name="beatle">John</label>
</div>
<div><label for="bar_1">
<input type="radio" id="bar_1" value="P" name="beatle">Paul</label>
</div>
<div><label for="bar_2">
<input type="radio" id="bar_2" value="G" name="beatle">George</label>
</div>
<div><label for="bar_3">
<input type="radio" id="bar_3" value="R" name="beatle">Ringo</label>
</div>
</div>
"""
self.check_html(
self.widget(choices=self.beatles),
"beatle",
"J",
attrs={"id": "bar"},
html=html,
)
def test_class_attrs(self):
"""
The <div> in the multiple_input.html widget template include the class
attribute.
"""
html = """
<div class="bar">
<div><label>
<input checked type="radio" class="bar" value="J" name="beatle">John</label>
</div>
<div><label>
<input type="radio" class="bar" value="P" name="beatle">Paul</label>
</div>
<div><label>
<input type="radio" class="bar" value="G" name="beatle">George</label>
</div>
<div><label>
<input type="radio" class="bar" value="R" name="beatle">Ringo</label>
</div>
</div>
"""
self.check_html(
self.widget(choices=self.beatles),
"beatle",
"J",
attrs={"class": "bar"},
html=html,
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_doesnt_localize_input_value(self):
choices = [
(1, "One"),
(1000, "One thousand"),
(1000000, "One million"),
]
html = """
<div>
<div><label><input type="radio" name="number" value="1">One</label></div>
<div>
<label><input type="radio" name="number" value="1000">One thousand</label>
</div>
<div>
<label><input type="radio" name="number" value="1000000">One million</label>
</div>
</div>
"""
self.check_html(self.widget(choices=choices), "number", None, html=html)
choices = [
(datetime.time(0, 0), "midnight"),
(datetime.time(12, 0), "noon"),
]
html = """
<div>
<div>
<label><input type="radio" name="time" value="00:00:00">midnight</label>
</div>
<div>
<label><input type="radio" name="time" value="12:00:00">noon</label>
</div>
</div>
"""
self.check_html(self.widget(choices=choices), "time", None, html=html)
def test_render_as_subwidget(self):
"""A RadioSelect as a subwidget of MultiWidget."""
choices = BLANK_CHOICE_DASH + self.beatles
html = """
<div>
<div><label>
<input type="radio" name="beatle_0" value="">------</label>
</div>
<div><label>
<input checked type="radio" name="beatle_0" value="J">John</label>
</div>
<div><label>
<input type="radio" name="beatle_0" value="P">Paul</label>
</div>
<div><label>
<input type="radio" name="beatle_0" value="G">George</label>
</div>
<div><label>
<input type="radio" name="beatle_0" value="R">Ringo</label>
</div>
</div>
<input name="beatle_1" type="text" value="Some text">
"""
self.check_html(
MultiWidget([self.widget(choices=choices), TextInput()]),
"beatle",
["J", "Some text"],
html=html,
)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = ChoiceField(
widget=self.widget, choices=self.beatles, required=False
)
form = TestForm()
self.assertIs(self.widget.use_fieldset, True)
self.assertHTMLEqual(
'<div><fieldset><legend>Field:</legend><div id="id_field">'
'<div><label for="id_field_0">'
'<input type="radio" name="field" value="J" id="id_field_0"> John'
'</label></div><div><label for="id_field_1">'
'<input type="radio" name="field" value="P" id="id_field_1">Paul'
'</label></div><div><label for="id_field_2"><input type="radio" '
'name="field" value="G" id="id_field_2"> George</label></div>'
'<div><label for="id_field_3"><input type="radio" name="field" '
'value="R" id="id_field_3">Ringo</label></div></div></fieldset>'
"</div>",
form.render(),
)
|
RadioSelectTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/score-after-flipping-matrix.py
|
{
"start": 34,
"end": 412
}
|
class ____(object):
def matrixScore(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
R, C = len(A), len(A[0])
result = 0
for c in xrange(C):
col = 0
for r in xrange(R):
col += A[r][c] ^ A[r][0]
result += max(col, R-col) * 2**(C-1-c)
return result
|
Solution
|
python
|
eth-brownie__brownie
|
brownie/network/multicall.py
|
{
"start": 925,
"end": 1095
}
|
class ____(ObjectProxy):
"""A proxy object to be updated with the result of a multicall."""
def __repr__(self) -> str:
return repr(self.__wrapped__)
|
Result
|
python
|
great-expectations__great_expectations
|
great_expectations/render/components.py
|
{
"start": 2907,
"end": 4559
}
|
class ____(str, Enum):
"""Available legacy descriptive renderer names"""
COLUMN_PROPERTIES_TABLE_DISTINCT_COUNT_ROW = ".".join(
[
LegacyRendererType.DESCRIPTIVE,
"column_properties_table",
"distinct_count_row",
]
)
COLUMN_PROPERTIES_TABLE_DISTINCT_PERCENT_ROW = ".".join(
[
LegacyRendererType.DESCRIPTIVE,
"column_properties_table",
"distinct_percent_row",
]
)
COLUMN_PROPERTIES_TABLE_MISSING_COUNT_ROW = ".".join(
[LegacyRendererType.DESCRIPTIVE, "column_properties_table", "missing_count_row"]
)
COLUMN_PROPERTIES_TABLE_MISSING_PERCENT_ROW = ".".join(
[
LegacyRendererType.DESCRIPTIVE,
"column_properties_table",
"missing_percent_row",
]
)
COLUMN_PROPERTIES_TABLE_REGEX_COUNT_ROW = ".".join(
[LegacyRendererType.DESCRIPTIVE, "column_properties_table", "regex_count_row"]
)
EXAMPLE_VALUES_BLOCK = ".".join([LegacyRendererType.DESCRIPTIVE, "example_values_block"])
HISTOGRAM = ".".join([LegacyRendererType.DESCRIPTIVE, "histogram"])
QUANTILE_TABLE = ".".join([LegacyRendererType.DESCRIPTIVE, "quantile_table"])
STATS_TABLE_MAX_ROW = ".".join([LegacyRendererType.DESCRIPTIVE, "stats_table", "max_row"])
STATS_TABLE_MEAN_ROW = ".".join([LegacyRendererType.DESCRIPTIVE, "stats_table", "mean_row"])
STATS_TABLE_MIN_ROW = ".".join([LegacyRendererType.DESCRIPTIVE, "stats_table", "min_row"])
VALUE_COUNTS_BAR_CHART = ".".join([LegacyRendererType.DESCRIPTIVE, "value_counts_bar_chart"])
|
LegacyDescriptiveRendererType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/numbers-with-repeated-digits.py
|
{
"start": 35,
"end": 1032
}
|
class ____(object):
def numDupDigitsAtMostN(self, N):
"""
:type N: int
:rtype: int
"""
def P(m, n):
result = 1
for _ in xrange(n):
result *= m
m -= 1
return result
digits = map(int, str(N+1))
result = 0
# Given 321
#
# 1. count numbers without repeated digits:
# - X
# - XX
for i in xrange(1, len(digits)):
result += P(9, 1)*P(9, i-1)
# 2. count numbers without repeated digits:
# - 1XX ~ 3XX
# - 30X ~ 32X
# - 320 ~ 321
prefix_set = set()
for i, x in enumerate(digits):
for y in xrange(1 if i == 0 else 0, x):
if y in prefix_set:
continue
result += P(9-i, len(digits)-i-1)
if x in prefix_set:
break
prefix_set.add(x)
return N-result
|
Solution
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_primitive.py
|
{
"start": 1562,
"end": 3782
}
|
class ____:
def test_valid(self) -> None:
prop = bcpp.Bool()
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(np.bool_(False))
assert prop.is_valid(np.bool_(True))
def test_invalid(self) -> None:
prop = bcpp.Bool()
assert not prop.is_valid(None)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(np.int8(0))
assert not prop.is_valid(np.int8(1))
assert not prop.is_valid(np.int16(0))
assert not prop.is_valid(np.int16(1))
assert not prop.is_valid(np.int32(0))
assert not prop.is_valid(np.int32(1))
assert not prop.is_valid(np.int64(0))
assert not prop.is_valid(np.int64(1))
assert not prop.is_valid(np.uint8(0))
assert not prop.is_valid(np.uint8(1))
assert not prop.is_valid(np.uint16(0))
assert not prop.is_valid(np.uint16(1))
assert not prop.is_valid(np.uint32(0))
assert not prop.is_valid(np.uint32(1))
assert not prop.is_valid(np.uint64(0))
assert not prop.is_valid(np.uint64(1))
assert not prop.is_valid(np.float16(0))
assert not prop.is_valid(np.float16(1))
assert not prop.is_valid(np.float32(0))
assert not prop.is_valid(np.float32(1))
assert not prop.is_valid(np.float64(0))
assert not prop.is_valid(np.float64(1))
assert not prop.is_valid(np.complex64(1.0+1.0j))
assert not prop.is_valid(np.complex128(1.0+1.0j))
if hasattr(np, "complex256"):
assert not prop.is_valid(np.complex256(1.0+1.0j))
def test_has_ref(self) -> None:
prop = bcpp.Bool()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpp.Bool()
assert str(prop) == "Bool"
|
Test_Bool
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/imageview/ImageViewTemplate_generic.py
|
{
"start": 341,
"end": 8375
}
|
class ____(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(726, 588)
self.gridLayout_3 = QtWidgets.QGridLayout(Form)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Orientation.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.graphicsView = GraphicsView(self.layoutWidget)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 0, 0, 2, 1)
self.histogram = HistogramLUTWidget(self.layoutWidget)
self.histogram.setObjectName("histogram")
self.gridLayout.addWidget(self.histogram, 0, 1, 1, 2)
self.roiBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.roiBtn.sizePolicy().hasHeightForWidth())
self.roiBtn.setSizePolicy(sizePolicy)
self.roiBtn.setCheckable(True)
self.roiBtn.setObjectName("roiBtn")
self.gridLayout.addWidget(self.roiBtn, 1, 1, 1, 1)
self.menuBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.menuBtn.sizePolicy().hasHeightForWidth())
self.menuBtn.setSizePolicy(sizePolicy)
self.menuBtn.setObjectName("menuBtn")
self.gridLayout.addWidget(self.menuBtn, 1, 2, 1, 1)
self.roiPlot = PlotWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.roiPlot.sizePolicy().hasHeightForWidth())
self.roiPlot.setSizePolicy(sizePolicy)
self.roiPlot.setMinimumSize(QtCore.QSize(0, 40))
self.roiPlot.setObjectName("roiPlot")
self.gridLayout_3.addWidget(self.splitter, 0, 0, 1, 1)
self.normGroup = QtWidgets.QGroupBox(Form)
self.normGroup.setObjectName("normGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.normGroup)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.normSubtractRadio = QtWidgets.QRadioButton(self.normGroup)
self.normSubtractRadio.setObjectName("normSubtractRadio")
self.gridLayout_2.addWidget(self.normSubtractRadio, 0, 2, 1, 1)
self.normDivideRadio = QtWidgets.QRadioButton(self.normGroup)
self.normDivideRadio.setChecked(False)
self.normDivideRadio.setObjectName("normDivideRadio")
self.gridLayout_2.addWidget(self.normDivideRadio, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 1)
self.normROICheck = QtWidgets.QCheckBox(self.normGroup)
self.normROICheck.setObjectName("normROICheck")
self.gridLayout_2.addWidget(self.normROICheck, 1, 1, 1, 1)
self.normXBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normXBlurSpin.setObjectName("normXBlurSpin")
self.gridLayout_2.addWidget(self.normXBlurSpin, 2, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.normGroup)
self.label_8.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.normGroup)
self.label_9.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 2, 3, 1, 1)
self.normYBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normYBlurSpin.setObjectName("normYBlurSpin")
self.gridLayout_2.addWidget(self.normYBlurSpin, 2, 4, 1, 1)
self.label_10 = QtWidgets.QLabel(self.normGroup)
self.label_10.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 2, 5, 1, 1)
self.normOffRadio = QtWidgets.QRadioButton(self.normGroup)
self.normOffRadio.setChecked(True)
self.normOffRadio.setObjectName("normOffRadio")
self.gridLayout_2.addWidget(self.normOffRadio, 0, 3, 1, 1)
self.normTimeRangeCheck = QtWidgets.QCheckBox(self.normGroup)
self.normTimeRangeCheck.setObjectName("normTimeRangeCheck")
self.gridLayout_2.addWidget(self.normTimeRangeCheck, 1, 3, 1, 1)
self.normFrameCheck = QtWidgets.QCheckBox(self.normGroup)
self.normFrameCheck.setObjectName("normFrameCheck")
self.gridLayout_2.addWidget(self.normFrameCheck, 1, 2, 1, 1)
self.normTBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normTBlurSpin.setObjectName("normTBlurSpin")
self.gridLayout_2.addWidget(self.normTBlurSpin, 2, 6, 1, 1)
self.gridLayout_3.addWidget(self.normGroup, 1, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "PyQtGraph"))
self.roiBtn.setText(_translate("Form", "ROI"))
self.menuBtn.setText(_translate("Form", "Menu"))
self.normGroup.setTitle(_translate("Form", "Normalization"))
self.normSubtractRadio.setText(_translate("Form", "Subtract"))
self.normDivideRadio.setText(_translate("Form", "Divide"))
self.label_5.setText(_translate("Form", "Operation:"))
self.label_3.setText(_translate("Form", "Mean:"))
self.label_4.setText(_translate("Form", "Blur:"))
self.normROICheck.setText(_translate("Form", "ROI"))
self.label_8.setText(_translate("Form", "X"))
self.label_9.setText(_translate("Form", "Y"))
self.label_10.setText(_translate("Form", "T"))
self.normOffRadio.setText(_translate("Form", "Off"))
self.normTimeRangeCheck.setText(_translate("Form", "Time range"))
self.normFrameCheck.setText(_translate("Form", "Frame"))
from ..widgets.GraphicsView import GraphicsView
from ..widgets.HistogramLUTWidget import HistogramLUTWidget
from ..widgets.PlotWidget import PlotWidget
|
Ui_Form
|
python
|
wandb__wandb
|
wandb/sdk/data_types/saved_model.py
|
{
"start": 15574,
"end": 16224
}
|
class ____(_SavedModel["tensorflow.keras.Model"]):
_log_type = "tfkeras-model-file"
_path_extension = ""
@staticmethod
def _deserialize(
dir_or_file_path: str,
) -> tensorflow.keras.Model:
return _get_tf_keras().models.load_model(dir_or_file_path)
@staticmethod
def _validate_obj(obj: Any) -> bool:
return isinstance(obj, _get_tf_keras().models.Model)
@staticmethod
def _serialize(model_obj: tensorflow.keras.Model, dir_or_file_path: str) -> None:
_get_tf_keras().models.save_model(
model_obj, dir_or_file_path, include_optimizer=True
)
|
_TensorflowKerasSavedModel
|
python
|
bokeh__bokeh
|
src/bokeh/models/graphs.py
|
{
"start": 4158,
"end": 4514
}
|
class ____(GraphHitTestPolicy):
'''
With the ``EdgesOnly`` policy, only graph edges are able to be selected and
inspected. There is no selection or inspection of graph nodes.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
|
EdgesOnly
|
python
|
huggingface__transformers
|
src/transformers/models/hgnet_v2/modeling_hgnet_v2.py
|
{
"start": 2318,
"end": 3477
}
|
class ____(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
activation: str = "relu",
use_learnable_affine_block: bool = False,
):
super().__init__()
self.convolution = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
groups=groups,
padding=(kernel_size - 1) // 2,
bias=False,
)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
if activation and use_learnable_affine_block:
self.lab = HGNetV2LearnableAffineBlock()
else:
self.lab = nn.Identity()
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.lab(hidden_state)
return hidden_state
|
HGNetV2ConvLayer
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
|
{
"start": 15406,
"end": 16092
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "RepositoryNotFoundError"
repository_name = graphene.NonNull(graphene.String)
repository_location_name = graphene.NonNull(graphene.String)
def __init__(self, repository_location_name, repository_name):
super().__init__()
self.repository_name = check.str_param(repository_name, "repository_name")
self.repository_location_name = check.str_param(
repository_location_name, "repository_location_name"
)
self.message = f"Could not find Repository {repository_location_name}.{repository_name}"
|
GrapheneRepositoryNotFoundError
|
python
|
falconry__falcon
|
falcon/errors.py
|
{
"start": 4713,
"end": 4983
}
|
class ____(WebSocketDisconnected):
"""No route could be found for the requested path.
A simulated WebSocket connection was attempted but the path specified in
the handshake request did not match any of the app's routes.
"""
pass
|
WebSocketPathNotFound
|
python
|
urllib3__urllib3
|
test/test_response.py
|
{
"start": 53630,
"end": 56196
}
|
class ____:
def __init__(self, content: list[bytes]) -> None:
"""
content: collection of str, each str is a chunk in response
"""
self.content = content
self.index = 0 # This class iterates over self.content.
self.closed = False
self.cur_chunk = b""
self.chunks_exhausted = False
def _encode_chunk(self, chunk: bytes) -> bytes:
# In the general case, we can't decode the chunk to unicode
length = f"{len(chunk):X}\r\n"
return length.encode() + chunk + b"\r\n"
def _pop_new_chunk(self) -> bytes:
if self.chunks_exhausted:
return b""
try:
chunk = self.content[self.index]
except IndexError:
chunk = b""
self.chunks_exhausted = True
else:
self.index += 1
chunk = self._encode_chunk(chunk)
if not isinstance(chunk, bytes):
chunk = chunk.encode()
assert isinstance(chunk, bytes)
return chunk
def pop_current_chunk(self, amt: int = -1, till_crlf: bool = False) -> bytes:
if amt > 0 and till_crlf:
raise ValueError("Can't specify amt and till_crlf.")
if len(self.cur_chunk) <= 0:
self.cur_chunk = self._pop_new_chunk()
if till_crlf:
try:
i = self.cur_chunk.index(b"\r\n")
except ValueError:
# No CRLF in current chunk -- probably caused by encoder.
self.cur_chunk = b""
return b""
else:
chunk_part = self.cur_chunk[: i + 2]
self.cur_chunk = self.cur_chunk[i + 2 :]
return chunk_part
elif amt <= -1:
chunk_part = self.cur_chunk
self.cur_chunk = b""
return chunk_part
else:
try:
chunk_part = self.cur_chunk[:amt]
except IndexError:
chunk_part = self.cur_chunk
self.cur_chunk = b""
else:
self.cur_chunk = self.cur_chunk[amt:]
return chunk_part
def readline(self) -> bytes:
return self.pop_current_chunk(till_crlf=True)
def read(self, amt: int = -1) -> bytes:
return self.pop_current_chunk(amt)
def read1(self, amt: int = -1) -> bytes:
return self.pop_current_chunk(amt)
def flush(self) -> None:
# Python 3 wants this method.
pass
def close(self) -> None:
self.closed = True
|
MockChunkedEncodingResponse
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.